repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
groovecoder/kuma | vendor/packages/logilab/astng/manager.py | 25 | 11622 | # copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""astng manager: avoid multiple astng build of a same module when
possible by providing a class responsible to get astng representation
from various source and using a cache of built modules)
"""
__docformat__ = "restructuredtext en"
import os
from os.path import dirname, join, isdir, exists
from logilab.common.modutils import NoSourceFile, is_python_source, \
file_from_modpath, load_module_from_name, modpath_from_file, \
get_module_files, get_source_file, zipimport
from logilab.common.configuration import OptionsProviderMixIn
from logilab.astng.exceptions import ASTNGBuildingException
def astng_wrapper(func, modname):
"""wrapper to give to ASTNGManager.project_from_files"""
print 'parsing %s...' % modname
try:
return func(modname)
except ASTNGBuildingException, exc:
print exc
except Exception, exc:
import traceback
traceback.print_exc()
def _silent_no_wrap(func, modname):
"""silent wrapper that doesn't do anything; can be used for tests"""
return func(modname)
def safe_repr(obj):
try:
return repr(obj)
except:
return '???'
class ASTNGManager(OptionsProviderMixIn):
"""the astng manager, responsible to build astng from files
or modules.
Use the Borg pattern.
"""
name = 'astng loader'
options = (("ignore",
{'type' : "csv", 'metavar' : "<file>",
'dest' : "black_list", "default" : ('CVS',),
'help' : "add <file> (may be a directory) to the black list\
. It should be a base name, not a path. You may set this option multiple times\
."}),
("project",
{'default': "No Name", 'type' : 'string', 'short': 'p',
'metavar' : '<project name>',
'help' : 'set the project name.'}),
)
brain = {}
def __init__(self):
self.__dict__ = ASTNGManager.brain
if not self.__dict__:
OptionsProviderMixIn.__init__(self)
self.load_defaults()
# NOTE: cache entries are added by the [re]builder
self.astng_cache = {}
self._mod_file_cache = {}
self.transformers = []
def astng_from_file(self, filepath, modname=None, fallback=True, source=False):
"""given a module name, return the astng object"""
try:
filepath = get_source_file(filepath, include_no_ext=True)
source = True
except NoSourceFile:
pass
if modname is None:
try:
modname = '.'.join(modpath_from_file(filepath))
except ImportError:
modname = filepath
if modname in self.astng_cache:
return self.astng_cache[modname]
if source:
from logilab.astng.builder import ASTNGBuilder
return ASTNGBuilder(self).file_build(filepath, modname)
elif fallback and modname:
return self.astng_from_module_name(modname)
raise ASTNGBuildingException('unable to get astng for file %s' %
filepath)
def astng_from_module_name(self, modname, context_file=None):
"""given a module name, return the astng object"""
if modname in self.astng_cache:
return self.astng_cache[modname]
if modname == '__main__':
from logilab.astng.builder import ASTNGBuilder
return ASTNGBuilder(self).string_build('', modname)
old_cwd = os.getcwd()
if context_file:
os.chdir(dirname(context_file))
try:
filepath = self.file_from_module_name(modname, context_file)
if filepath is not None and not is_python_source(filepath):
module = self.zip_import_data(filepath)
if module is not None:
return module
if filepath is None or not is_python_source(filepath):
try:
module = load_module_from_name(modname)
except Exception, ex:
msg = 'Unable to load module %s (%s)' % (modname, ex)
raise ASTNGBuildingException(msg)
return self.astng_from_module(module, modname)
return self.astng_from_file(filepath, modname, fallback=False)
finally:
os.chdir(old_cwd)
def zip_import_data(self, filepath):
if zipimport is None:
return None
from logilab.astng.builder import ASTNGBuilder
builder = ASTNGBuilder(self)
for ext in ('.zip', '.egg'):
try:
eggpath, resource = filepath.rsplit(ext + '/', 1)
except ValueError:
continue
try:
importer = zipimport.zipimporter(eggpath + ext)
zmodname = resource.replace('/', '.')
if importer.is_package(resource):
zmodname = zmodname + '.__init__'
module = builder.string_build(importer.get_source(resource),
zmodname, filepath)
return module
except:
continue
return None
def file_from_module_name(self, modname, contextfile):
try:
value = self._mod_file_cache[(modname, contextfile)]
except KeyError:
try:
value = file_from_modpath(modname.split('.'),
context_file=contextfile)
except ImportError, ex:
msg = 'Unable to load module %s (%s)' % (modname, ex)
value = ASTNGBuildingException(msg)
self._mod_file_cache[(modname, contextfile)] = value
if isinstance(value, ASTNGBuildingException):
raise value
return value
def astng_from_module(self, module, modname=None):
"""given an imported module, return the astng object"""
modname = modname or module.__name__
if modname in self.astng_cache:
return self.astng_cache[modname]
try:
# some builtin modules don't have __file__ attribute
filepath = module.__file__
if is_python_source(filepath):
return self.astng_from_file(filepath, modname)
except AttributeError:
pass
from logilab.astng.builder import ASTNGBuilder
return ASTNGBuilder(self).module_build(module, modname)
def astng_from_class(self, klass, modname=None):
"""get astng for the given class"""
if modname is None:
try:
modname = klass.__module__
except AttributeError:
raise ASTNGBuildingException(
'Unable to get module for class %s' % safe_repr(klass))
modastng = self.astng_from_module_name(modname)
return modastng.getattr(klass.__name__)[0] # XXX
def infer_astng_from_something(self, obj, context=None):
"""infer astng for the given class"""
if hasattr(obj, '__class__') and not isinstance(obj, type):
klass = obj.__class__
else:
klass = obj
try:
modname = klass.__module__
except AttributeError:
raise ASTNGBuildingException(
'Unable to get module for %s' % safe_repr(klass))
except Exception, ex:
raise ASTNGBuildingException(
'Unexpected error while retrieving module for %s: %s'
% (safe_repr(klass), ex))
try:
name = klass.__name__
except AttributeError:
raise ASTNGBuildingException(
'Unable to get name for %s' % safe_repr(klass))
except Exception, ex:
raise ASTNGBuildingException(
'Unexpected error while retrieving name for %s: %s'
% (safe_repr(klass), ex))
# take care, on living object __module__ is regularly wrong :(
modastng = self.astng_from_module_name(modname)
if klass is obj:
for infered in modastng.igetattr(name, context):
yield infered
else:
for infered in modastng.igetattr(name, context):
yield infered.instanciate_class()
def project_from_files(self, files, func_wrapper=astng_wrapper,
project_name=None, black_list=None):
"""return a Project from a list of files or modules"""
# build the project representation
project_name = project_name or self.config.project
black_list = black_list or self.config.black_list
project = Project(project_name)
for something in files:
if not exists(something):
fpath = file_from_modpath(something.split('.'))
elif isdir(something):
fpath = join(something, '__init__.py')
else:
fpath = something
astng = func_wrapper(self.astng_from_file, fpath)
if astng is None:
continue
# XXX why is first file defining the project.path ?
project.path = project.path or astng.file
project.add_module(astng)
base_name = astng.name
# recurse in package except if __init__ was explicitly given
if astng.package and something.find('__init__') == -1:
# recurse on others packages / modules if this is a package
for fpath in get_module_files(dirname(astng.file),
black_list):
astng = func_wrapper(self.astng_from_file, fpath)
if astng is None or astng.name == base_name:
continue
project.add_module(astng)
return project
def register_transformer(self, transformer):
self.transformers.append(transformer)
class Project:
"""a project handle a set of modules / packages"""
def __init__(self, name=''):
self.name = name
self.path = None
self.modules = []
self.locals = {}
self.__getitem__ = self.locals.__getitem__
self.__iter__ = self.locals.__iter__
self.values = self.locals.values
self.keys = self.locals.keys
self.items = self.locals.items
def add_module(self, node):
self.locals[node.name] = node
self.modules.append(node)
def get_module(self, name):
return self.locals[name]
def get_children(self):
return self.modules
def __repr__(self):
return '<Project %r at %s (%s modules)>' % (self.name, id(self),
len(self.modules))
| mpl-2.0 |
gangadharkadam/office_erp | erpnext/manufacturing/doctype/bom_replace_tool/bom_replace_tool.py | 18 | 1378 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, flt
from frappe import _
from frappe.model.document import Document
class BOMReplaceTool(Document):
def replace_bom(self):
self.validate_bom()
self.update_new_bom()
bom_list = self.get_parent_boms()
updated_bom = []
for bom in bom_list:
bom_obj = frappe.get_doc("BOM", bom)
updated_bom = bom_obj.update_cost_and_exploded_items(updated_bom)
frappe.msgprint(_("BOM replaced"))
def validate_bom(self):
if cstr(self.current_bom) == cstr(self.new_bom):
frappe.throw(_("Current BOM and New BOM can not be same"))
def update_new_bom(self):
current_bom_unitcost = frappe.db.sql("""select total_cost/quantity
from `tabBOM` where name = %s""", self.current_bom)
current_bom_unitcost = current_bom_unitcost and flt(current_bom_unitcost[0][0]) or 0
frappe.db.sql("""update `tabBOM Item` set bom_no=%s,
rate=%s, amount=qty*%s where bom_no = %s and docstatus < 2""",
(self.new_bom, current_bom_unitcost, current_bom_unitcost, self.current_bom))
def get_parent_boms(self):
return [d[0] for d in frappe.db.sql("""select distinct parent
from `tabBOM Item` where ifnull(bom_no, '') = %s and docstatus < 2""",
self.new_bom)]
| agpl-3.0 |
Keleir/glances | glances/plugins/glances_mem.py | 11 | 11098 | # -*- coding: utf-8 -*-
#
# This file is part of Glances.
#
# Copyright (C) 2015 Nicolargo <nicolas@nicolargo.com>
#
# Glances is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Glances is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Virtual memory plugin."""
from glances.plugins.glances_plugin import GlancesPlugin
import psutil
# SNMP OID
# Total RAM in machine: .1.3.6.1.4.1.2021.4.5.0
# Total RAM used: .1.3.6.1.4.1.2021.4.6.0
# Total RAM Free: .1.3.6.1.4.1.2021.4.11.0
# Total RAM Shared: .1.3.6.1.4.1.2021.4.13.0
# Total RAM Buffered: .1.3.6.1.4.1.2021.4.14.0
# Total Cached Memory: .1.3.6.1.4.1.2021.4.15.0
# Note: For Windows, stats are in the FS table
snmp_oid = {'default': {'total': '1.3.6.1.4.1.2021.4.5.0',
'free': '1.3.6.1.4.1.2021.4.11.0',
'shared': '1.3.6.1.4.1.2021.4.13.0',
'buffers': '1.3.6.1.4.1.2021.4.14.0',
'cached': '1.3.6.1.4.1.2021.4.15.0'},
'windows': {'mnt_point': '1.3.6.1.2.1.25.2.3.1.3',
'alloc_unit': '1.3.6.1.2.1.25.2.3.1.4',
'size': '1.3.6.1.2.1.25.2.3.1.5',
'used': '1.3.6.1.2.1.25.2.3.1.6'},
'esxi': {'mnt_point': '1.3.6.1.2.1.25.2.3.1.3',
'alloc_unit': '1.3.6.1.2.1.25.2.3.1.4',
'size': '1.3.6.1.2.1.25.2.3.1.5',
'used': '1.3.6.1.2.1.25.2.3.1.6'}}
# Define the history items list
# All items in this list will be historised if the --enable-history tag is set
# 'color' define the graph color in #RGB format
items_history_list = [{'name': 'percent', 'color': '#00FF00', 'y_unit': '%'}]
class Plugin(GlancesPlugin):
"""Glances' memory plugin.
stats is a dict
"""
def __init__(self, args=None):
"""Init the plugin."""
GlancesPlugin.__init__(self, args=args, items_history_list=items_history_list)
# We want to display the stat in the curse interface
self.display_curse = True
# Init the stats
self.reset()
def reset(self):
"""Reset/init the stats."""
self.stats = {}
@GlancesPlugin._log_result_decorator
def update(self):
"""Update RAM memory stats using the input method."""
# Reset stats
self.reset()
if self.input_method == 'local':
# Update stats using the standard system lib
# Grab MEM using the PSUtil virtual_memory method
vm_stats = psutil.virtual_memory()
# Get all the memory stats (copy/paste of the PsUtil documentation)
# total: total physical memory available.
# available: the actual amount of available memory that can be given instantly to processes that request more memory in bytes; this is calculated by summing different memory values depending on the platform (e.g. free + buffers + cached on Linux) and it is supposed to be used to monitor actual memory usage in a cross platform fashion.
# percent: the percentage usage calculated as (total - available) / total * 100.
# used: memory used, calculated differently depending on the platform and designed for informational purposes only.
# free: memory not being used at all (zeroed) that is readily available; note that this doesn’t reflect the actual memory available (use ‘available’ instead).
# Platform-specific fields:
# active: (UNIX): memory currently in use or very recently used, and so it is in RAM.
# inactive: (UNIX): memory that is marked as not used.
# buffers: (Linux, BSD): cache for things like file system metadata.
# cached: (Linux, BSD): cache for various things.
# wired: (BSD, OSX): memory that is marked to always stay in RAM. It is never moved to disk.
# shared: (BSD): memory that may be simultaneously accessed by multiple processes.
self.reset()
for mem in ['total', 'available', 'percent', 'used', 'free',
'active', 'inactive', 'buffers', 'cached',
'wired', 'shared']:
if hasattr(vm_stats, mem):
self.stats[mem] = getattr(vm_stats, mem)
# Use the 'free'/htop calculation
# free=available+buffer+cached
self.stats['free'] = self.stats['available']
if hasattr(self.stats, 'buffers'):
self.stats['free'] += self.stats['buffers']
if hasattr(self.stats, 'cached'):
self.stats['free'] += self.stats['cached']
# used=total-free
self.stats['used'] = self.stats['total'] - self.stats['free']
elif self.input_method == 'snmp':
# Update stats using SNMP
if self.short_system_name in ('windows', 'esxi'):
# Mem stats for Windows|Vmware Esxi are stored in the FS table
try:
fs_stat = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name],
bulk=True)
except KeyError:
self.reset()
else:
for fs in fs_stat:
# The Physical Memory (Windows) or Real Memory (VMware)
# gives statistics on RAM usage and availability.
if fs in ('Physical Memory', 'Real Memory'):
self.stats['total'] = int(fs_stat[fs]['size']) * int(fs_stat[fs]['alloc_unit'])
self.stats['used'] = int(fs_stat[fs]['used']) * int(fs_stat[fs]['alloc_unit'])
self.stats['percent'] = float(self.stats['used'] * 100 / self.stats['total'])
self.stats['free'] = self.stats['total'] - self.stats['used']
break
else:
# Default behavor for others OS
self.stats = self.get_stats_snmp(snmp_oid=snmp_oid['default'])
if self.stats['total'] == '':
self.reset()
return self.stats
for key in list(self.stats.keys()):
if self.stats[key] != '':
self.stats[key] = float(self.stats[key]) * 1024
# Use the 'free'/htop calculation
self.stats['free'] = self.stats['free'] - self.stats['total'] + (self.stats['buffers'] + self.stats['cached'])
# used=total-free
self.stats['used'] = self.stats['total'] - self.stats['free']
# percent: the percentage usage calculated as (total - available) / total * 100.
self.stats['percent'] = float((self.stats['total'] - self.stats['free']) / self.stats['total'] * 100)
# Update the history list
self.update_stats_history()
# Update the view
self.update_views()
return self.stats
def update_views(self):
"""Update stats views."""
# Call the father's method
GlancesPlugin.update_views(self)
# Add specifics informations
# Alert and log
self.views['used']['decoration'] = self.get_alert_log(self.stats['used'], maximum=self.stats['total'])
# Optional
for key in ['active', 'inactive', 'buffers', 'cached']:
if key in self.stats:
self.views[key]['optional'] = True
def msg_curse(self, args=None):
"""Return the dict to display in the curse interface."""
# Init the return message
ret = []
# Only process if stats exist...
if not self.stats:
return ret
# Build the string message
# Header
msg = '{0:5} '.format('MEM')
ret.append(self.curse_add_line(msg, "TITLE"))
# Percent memory usage
msg = '{0:>7.1%}'.format(self.stats['percent'] / 100)
ret.append(self.curse_add_line(msg))
# Active memory usage
if 'active' in self.stats:
msg = ' {0:9}'.format('active:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='active', option='optional')))
msg = '{0:>7}'.format(self.auto_unit(self.stats['active']))
ret.append(self.curse_add_line(msg, optional=self.get_views(key='active', option='optional')))
# New line
ret.append(self.curse_new_line())
# Total memory usage
msg = '{0:6}'.format('total:')
ret.append(self.curse_add_line(msg))
msg = '{0:>7}'.format(self.auto_unit(self.stats['total']))
ret.append(self.curse_add_line(msg))
# Inactive memory usage
if 'inactive' in self.stats:
msg = ' {0:9}'.format('inactive:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='inactive', option='optional')))
msg = '{0:>7}'.format(self.auto_unit(self.stats['inactive']))
ret.append(self.curse_add_line(msg, optional=self.get_views(key='inactive', option='optional')))
# New line
ret.append(self.curse_new_line())
# Used memory usage
msg = '{0:6}'.format('used:')
ret.append(self.curse_add_line(msg))
msg = '{0:>7}'.format(self.auto_unit(self.stats['used']))
ret.append(self.curse_add_line(
msg, self.get_views(key='used', option='decoration')))
# Buffers memory usage
if 'buffers' in self.stats:
msg = ' {0:9}'.format('buffers:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='buffers', option='optional')))
msg = '{0:>7}'.format(self.auto_unit(self.stats['buffers']))
ret.append(self.curse_add_line(msg, optional=self.get_views(key='buffers', option='optional')))
# New line
ret.append(self.curse_new_line())
# Free memory usage
msg = '{0:6}'.format('free:')
ret.append(self.curse_add_line(msg))
msg = '{0:>7}'.format(self.auto_unit(self.stats['free']))
ret.append(self.curse_add_line(msg))
# Cached memory usage
if 'cached' in self.stats:
msg = ' {0:9}'.format('cached:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='cached', option='optional')))
msg = '{0:>7}'.format(self.auto_unit(self.stats['cached']))
ret.append(self.curse_add_line(msg, optional=self.get_views(key='cached', option='optional')))
return ret
| lgpl-3.0 |
ingadhoc/odoo | addons/decimal_precision/tests/test_qweb_float.py | 335 | 1986 | # -*- coding: utf-8 -*-
from openerp.tests import common
class TestFloatExport(common.TransactionCase):
def setUp(self):
super(TestFloatExport, self).setUp()
self.Model = self.registry('decimal.precision.test')
def get_converter(self, name):
converter = self.registry('ir.qweb.field.float')
field = self.Model._fields[name]
return lambda value, options=None: converter.value_to_html(
self.cr, self.uid, value, field, options=options, context=None)
def test_basic_float(self):
converter = self.get_converter('float')
self.assertEqual(
converter(42.0),
"42.0")
self.assertEqual(
converter(42.12345),
"42.12345")
converter = self.get_converter('float_2')
self.assertEqual(
converter(42.0),
"42.00")
self.assertEqual(
converter(42.12345),
"42.12")
converter = self.get_converter('float_4')
self.assertEqual(
converter(42.0),
'42.0000')
self.assertEqual(
converter(42.12345),
'42.1234')
def test_precision_domain(self):
DP = self.registry('decimal.precision')
DP.create(self.cr, self.uid, {
'name': 'A',
'digits': 2,
})
DP.create(self.cr, self.uid, {
'name': 'B',
'digits': 6,
})
converter = self.get_converter('float')
self.assertEqual(
converter(42.0, {'decimal_precision': 'A'}),
'42.00')
self.assertEqual(
converter(42.0, {'decimal_precision': 'B'}),
'42.000000')
converter = self.get_converter('float_4')
self.assertEqual(
converter(42.12345, {'decimal_precision': 'A'}),
'42.12')
self.assertEqual(
converter(42.12345, {'decimal_precision': 'B'}),
'42.123450')
| agpl-3.0 |
HousekeepLtd/django | tests/template_backends/test_django.py | 199 | 4793 | from template_tests.test_response import test_processor_name
from django.template import RequestContext
from django.template.backends.django import DjangoTemplates
from django.template.library import InvalidTemplateLibrary
from django.test import RequestFactory, ignore_warnings, override_settings
from django.utils.deprecation import RemovedInDjango110Warning
from .test_dummy import TemplateStringsTests
class DjangoTemplatesTests(TemplateStringsTests):
engine_class = DjangoTemplates
backend_name = 'django'
def test_context_has_priority_over_template_context_processors(self):
# See ticket #23789.
engine = DjangoTemplates({
'DIRS': [],
'APP_DIRS': False,
'NAME': 'django',
'OPTIONS': {
'context_processors': [test_processor_name],
},
})
template = engine.from_string('{{ processors }}')
request = RequestFactory().get('/')
# Check that context processors run
content = template.render({}, request)
self.assertEqual(content, 'yes')
# Check that context overrides context processors
content = template.render({'processors': 'no'}, request)
self.assertEqual(content, 'no')
@ignore_warnings(category=RemovedInDjango110Warning)
def test_request_context_conflicts_with_request(self):
template = self.engine.from_string('hello')
request = RequestFactory().get('/')
request_context = RequestContext(request)
# This doesn't raise an exception.
template.render(request_context, request)
other_request = RequestFactory().get('/')
msg = ("render() was called with a RequestContext and a request "
"argument which refer to different requests. Make sure "
"that the context argument is a dict or at least that "
"the two arguments refer to the same request.")
with self.assertRaisesMessage(ValueError, msg):
template.render(request_context, other_request)
@override_settings(INSTALLED_APPS=['template_backends.apps.good'])
def test_templatetag_discovery(self):
engine = DjangoTemplates({
'DIRS': [],
'APP_DIRS': False,
'NAME': 'django',
'OPTIONS': {
'libraries': {
'alternate': 'template_backends.apps.good.templatetags.good_tags',
'override': 'template_backends.apps.good.templatetags.good_tags',
},
},
})
# libraries are discovered from installed applications
self.assertEqual(
engine.engine.libraries['good_tags'],
'template_backends.apps.good.templatetags.good_tags',
)
self.assertEqual(
engine.engine.libraries['subpackage.tags'],
'template_backends.apps.good.templatetags.subpackage.tags',
)
# libraries are discovered from django.templatetags
self.assertEqual(
engine.engine.libraries['static'],
'django.templatetags.static',
)
# libraries passed in OPTIONS are registered
self.assertEqual(
engine.engine.libraries['alternate'],
'template_backends.apps.good.templatetags.good_tags',
)
# libraries passed in OPTIONS take precedence over discovered ones
self.assertEqual(
engine.engine.libraries['override'],
'template_backends.apps.good.templatetags.good_tags',
)
@override_settings(INSTALLED_APPS=['template_backends.apps.importerror'])
def test_templatetag_discovery_import_error(self):
"""
Import errors in tag modules should be reraised with a helpful message.
"""
with self.assertRaisesMessage(
InvalidTemplateLibrary,
"ImportError raised when trying to load "
"'template_backends.apps.importerror.templatetags.broken_tags'"
):
DjangoTemplates({
'DIRS': [],
'APP_DIRS': False,
'NAME': 'django',
'OPTIONS': {},
})
def test_builtins_discovery(self):
engine = DjangoTemplates({
'DIRS': [],
'APP_DIRS': False,
'NAME': 'django',
'OPTIONS': {
'builtins': ['template_backends.apps.good.templatetags.good_tags'],
},
})
self.assertEqual(
engine.engine.builtins, [
'django.template.defaulttags',
'django.template.defaultfilters',
'django.template.loader_tags',
'template_backends.apps.good.templatetags.good_tags',
]
)
| bsd-3-clause |
sylarcp/anita | venv/lib/python2.7/site-packages/sqlalchemy/dialects/mssql/pymssql.py | 21 | 2968 | # mssql/pymssql.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+pymssql
:name: pymssql
:dbapi: pymssql
:connectstring: mssql+pymssql://<username>:<password>@<freetds_name>?\
charset=utf8
:url: http://pymssql.org/
pymssql is a Python module that provides a Python DBAPI interface around
`FreeTDS <http://www.freetds.org/>`_. Compatible builds are available for
Linux, MacOSX and Windows platforms.
"""
from .base import MSDialect
from ... import types as sqltypes, util, processors
import re
class _MSNumeric_pymssql(sqltypes.Numeric):
def result_processor(self, dialect, type_):
if not self.asdecimal:
return processors.to_float
else:
return sqltypes.Numeric.result_processor(self, dialect, type_)
class MSDialect_pymssql(MSDialect):
supports_sane_rowcount = False
driver = 'pymssql'
colspecs = util.update_copy(
MSDialect.colspecs,
{
sqltypes.Numeric: _MSNumeric_pymssql,
sqltypes.Float: sqltypes.Float,
}
)
@classmethod
def dbapi(cls):
module = __import__('pymssql')
# pymmsql doesn't have a Binary method. we use string
# TODO: monkeypatching here is less than ideal
module.Binary = lambda x: x if hasattr(x, 'decode') else str(x)
client_ver = tuple(int(x) for x in module.__version__.split("."))
if client_ver < (1, ):
util.warn("The pymssql dialect expects at least "
"the 1.0 series of the pymssql DBAPI.")
return module
def __init__(self, **params):
super(MSDialect_pymssql, self).__init__(**params)
self.use_scope_identity = True
def _get_server_version_info(self, connection):
vers = connection.scalar("select @@version")
m = re.match(
r"Microsoft .*? - (\d+).(\d+).(\d+).(\d+)", vers)
if m:
return tuple(int(x) for x in m.group(1, 2, 3, 4))
else:
return None
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
opts.update(url.query)
port = opts.pop('port', None)
if port and 'host' in opts:
opts['host'] = "%s:%s" % (opts['host'], port)
return [[], opts]
def is_disconnect(self, e, connection, cursor):
for msg in (
"Adaptive Server connection timed out",
"Net-Lib error during Connection reset by peer",
"message 20003", # connection timeout
"Error 10054",
"Not connected to any MS SQL server",
"Connection is closed"
):
if msg in str(e):
return True
else:
return False
dialect = MSDialect_pymssql
| mit |
saurabh6790/ON-RISAPP | selling/doctype/patient_registration/patient_registration.py | 11 | 3373 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import cstr, cint, flt, comma_or, nowdate, get_base_path
import barcode
import os
from webnotes.model.doc import Document, make_autoname
from selling.doctype.customer.customer import DocType
class DocType():
def __init__(self, d, dl):
self.doc, self.doclist = d, dl
def test_data(self):
webnotes.errprint("asd")
def on_update(self):
webnotes.errprint('onupdate')
flag = webnotes.conn.sql("select ifnull(name,'') from tabProfile where name='"+self.doc.email+"'",as_list=1,debug=1)
if not flag:
webnotes.errprint(flag)
self.create_profile()
self.generate_barcode()
#self.validate()
self.doc.master_type = "Patient Registration"
#self.create_account_head()
#self.create_customer()
self.create_patient_encounter_entry()
webnotes.errprint(self.doc.user_image_show)
self.doc.save()
def create_customer(self):
webnotes.errprint('customer creation starts')
from webnotes.model.doc import Document
d = Document('Customer')
d.customer_name = self.doc.name
d.gender = self.doc.gender
d.full_name = self.doc.customer_name
d.save()
webnotes.errprint(d.name)
def create_profile(self):
profile = webnotes.bean({
"doctype":"Profile",
"email": self.doc.email,
"first_name": self.doc.customer_name,
"user_image":self.doc.user_image,
"enabled": 1,
"user_type": "Customer"
})
profile.ignore_permissions = True
profile.insert()
def generate_barcode(self):
webnotes.errprint([self.doc.naming_series])
self.doc.patient_online_id=self.doc.name
from barcode.writer import ImageWriter
ean = barcode.get('code39',self.doc.patient_online_id,writer=ImageWriter())
path = os.path.join(get_base_path(), "public", "barcode_img")+"/"+self.doc.name
fullname = ean.save(path)
barcode_img = '<html>\
<table style="width: 100%; table-layout: fixed;">\
<tr>\
<td style="width:510px">\
<img src="'"/barcode_img/"+self.doc.name+".png"'" width="200px">\
</td>\
</tr>\
</table>\
</html>'
self.doc.barcode_image = barcode_img
def create_patient_encounter_entry(self):
from webnotes.model.bean import getlist
for encounter in getlist(self.doclist,'encounter_table'):
enct = Document('Patient Encounter Entry')
enct.encounter = encounter.encounter
enct.encounter_date = encounter.encounter_date
enct.radiologist_name = encounter.radiologist_name
enct.referrer_name = encounter.referrer_name
enct.problem_description = encounter.problem_description
enct.metal_in = encounter.metal_in
enct.pacemaker = encounter.pacemaker
enct.claustrophobia = encounter.claustrophobia
enct.pregnancy = encounter.pregnancy
enct.others = encounter.others
enct.procedure_alert = encounter.procedure_alert
enct.patient = encounter.parent
enct.entry_in_child = 'True'
enct.save()
webnotes.conn.sql("update tabEncounter set id = '%s' where name = '%s'"%(enct.name,encounter.name))
| agpl-3.0 |
sloanyang/aquantic | Tools/Scripts/webkitpy/common/checkout/checkout.py | 119 | 9383 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import StringIO
from webkitpy.common.config import urls
from webkitpy.common.checkout.changelog import ChangeLog, parse_bug_id_from_changelog
from webkitpy.common.checkout.commitinfo import CommitInfo
from webkitpy.common.checkout.scm import CommitMessage
from webkitpy.common.memoized import memoized
from webkitpy.common.system.executive import ScriptError
# This class represents the WebKit-specific parts of the checkout (like ChangeLogs).
# FIXME: Move a bunch of ChangeLog-specific processing from SCM to this object.
# NOTE: All paths returned from this class should be absolute.
class Checkout(object):
def __init__(self, scm, executive=None, filesystem=None):
self._scm = scm
# FIXME: We shouldn't be grabbing at private members on scm.
self._executive = executive or self._scm._executive
self._filesystem = filesystem or self._scm._filesystem
def is_path_to_changelog(self, path):
return self._filesystem.basename(path) == "ChangeLog"
def _latest_entry_for_changelog_at_revision(self, changelog_path, revision):
changelog_contents = self._scm.contents_at_revision(changelog_path, revision)
# contents_at_revision returns a byte array (str()), but we know
# that ChangeLog files are utf-8. parse_latest_entry_from_file
# expects a file-like object which vends unicode(), so we decode here.
# Old revisions of Sources/WebKit/wx/ChangeLog have some invalid utf8 characters.
changelog_file = StringIO.StringIO(changelog_contents.decode("utf-8", "ignore"))
return ChangeLog.parse_latest_entry_from_file(changelog_file)
def changelog_entries_for_revision(self, revision, changed_files=None):
if not changed_files:
changed_files = self._scm.changed_files_for_revision(revision)
# FIXME: This gets confused if ChangeLog files are moved, as
# deletes are still "changed files" per changed_files_for_revision.
# FIXME: For now we hack around this by caching any exceptions
# which result from having deleted files included the changed_files list.
changelog_entries = []
for path in changed_files:
if not self.is_path_to_changelog(path):
continue
try:
changelog_entries.append(self._latest_entry_for_changelog_at_revision(path, revision))
except ScriptError:
pass
return changelog_entries
def _changelog_data_for_revision(self, revision):
changed_files = self._scm.changed_files_for_revision(revision)
changelog_entries = self.changelog_entries_for_revision(revision, changed_files=changed_files)
# Assume for now that the first entry has everything we need:
# FIXME: This will throw an exception if there were no ChangeLogs.
if not len(changelog_entries):
return None
changelog_entry = changelog_entries[0]
return {
"bug_id": parse_bug_id_from_changelog(changelog_entry.contents()),
"author_name": changelog_entry.author_name(),
"author_email": changelog_entry.author_email(),
"author": changelog_entry.author(),
"reviewer_text": changelog_entry.reviewer_text(),
"reviewer": changelog_entry.reviewer(),
"contents": changelog_entry.contents(),
"changed_files": changed_files,
}
@memoized
def commit_info_for_revision(self, revision):
committer_email = self._scm.committer_email_for_revision(revision)
changelog_data = self._changelog_data_for_revision(revision)
if not changelog_data:
return None
return CommitInfo(revision, committer_email, changelog_data)
def bug_id_for_revision(self, revision):
return self.commit_info_for_revision(revision).bug_id()
def _modified_files_matching_predicate(self, git_commit, predicate, changed_files=None):
# SCM returns paths relative to scm.checkout_root
# Callers (especially those using the ChangeLog class) may
# expect absolute paths, so this method returns absolute paths.
if not changed_files:
changed_files = self._scm.changed_files(git_commit)
return filter(predicate, map(self._scm.absolute_path, changed_files))
def modified_changelogs(self, git_commit, changed_files=None):
return self._modified_files_matching_predicate(git_commit, self.is_path_to_changelog, changed_files=changed_files)
def modified_non_changelogs(self, git_commit, changed_files=None):
return self._modified_files_matching_predicate(git_commit, lambda path: not self.is_path_to_changelog(path), changed_files=changed_files)
def commit_message_for_this_commit(self, git_commit, changed_files=None, return_stderr=False):
changelog_paths = self.modified_changelogs(git_commit, changed_files)
if not len(changelog_paths):
raise ScriptError(message="Found no modified ChangeLogs, cannot create a commit message.\n"
"All changes require a ChangeLog. See:\n %s" % urls.contribution_guidelines)
message_text = self._scm.run([self._scm.script_path('commit-log-editor'), '--print-log'] + changelog_paths, return_stderr=return_stderr)
return CommitMessage(message_text.splitlines())
def recent_commit_infos_for_files(self, paths):
revisions = set(sum(map(self._scm.revisions_changing_file, paths), []))
return set(map(self.commit_info_for_revision, revisions))
def suggested_reviewers(self, git_commit, changed_files=None):
changed_files = self.modified_non_changelogs(git_commit, changed_files)
commit_infos = sorted(self.recent_commit_infos_for_files(changed_files), key=lambda info: info.revision(), reverse=True)
reviewers = filter(lambda person: person and person.can_review, sum(map(lambda info: [info.reviewer(), info.author()], commit_infos), []))
unique_reviewers = reduce(lambda suggestions, reviewer: suggestions + [reviewer if reviewer not in suggestions else None], reviewers, [])
return filter(lambda reviewer: reviewer, unique_reviewers)
def bug_id_for_this_commit(self, git_commit, changed_files=None):
try:
return parse_bug_id_from_changelog(self.commit_message_for_this_commit(git_commit, changed_files).message())
except ScriptError, e:
pass # We might not have ChangeLogs.
def apply_patch(self, patch):
# It's possible that the patch was not made from the root directory.
# We should detect and handle that case.
# FIXME: Move _scm.script_path here once we get rid of all the dependencies.
# --force (continue after errors) is the common case, so we always use it.
args = [self._scm.script_path('svn-apply'), "--force"]
if patch.reviewer():
args += ['--reviewer', patch.reviewer().full_name]
self._executive.run_command(args, input=patch.contents(), cwd=self._scm.checkout_root)
def apply_reverse_diff(self, revision):
self._scm.apply_reverse_diff(revision)
# We revert the ChangeLogs because removing lines from a ChangeLog
# doesn't make sense. ChangeLogs are append only.
changelog_paths = self.modified_changelogs(git_commit=None)
if len(changelog_paths):
self._scm.revert_files(changelog_paths)
conflicts = self._scm.conflicted_files()
if len(conflicts):
raise ScriptError(message="Failed to apply reverse diff for revision %s because of the following conflicts:\n%s" % (revision, "\n".join(conflicts)))
def apply_reverse_diffs(self, revision_list):
for revision in sorted(revision_list, reverse=True):
self.apply_reverse_diff(revision)
| gpl-2.0 |
pombredanne/parakeet | parakeet/syntax/wrappers.py | 2 | 1799 | from .. import names, prims
from ..ndtypes import ScalarT, Type, type_conv
from .. syntax import FormalArgs, Var, UntypedFn, Return, PrimCall, Expr, Cast
_untyped_fn_cache = {}
def simple_untyped_fn(name,
expr,
n_inputs = 1,
fixed_args = [],
keyword_args = {},
unpack = False):
key = name, expr, n_inputs, tuple(fixed_args), tuple(keyword_args.items()), unpack
if key in _untyped_fn_cache:
return _untyped_fn_cache[key]
fn_name = names.fresh(name)
args_obj = FormalArgs()
arg_vars = []
for name in names.fresh_list(n_inputs):
args_obj.add_positional(name)
arg_vars.append(Var(name))
if unpack:
combined_args = tuple(fixed_args) + tuple(arg_vars)
else:
combined_args = tuple(fixed_args) + (tuple(arg_vars),)
result = expr(*combined_args, **keyword_args)
body = [Return(result)]
fundef = UntypedFn(fn_name, args_obj, body, [])
_untyped_fn_cache[key] = fundef
return fundef
def build_untyped_prim_fn(p):
"""Given a primitive, return an untyped function which calls that prim"""
assert isinstance(p, prims.Prim), "Expected Prim but got %s" % p
return simple_untyped_fn(p.name, PrimCall, p.nin, [p])
def build_untyped_expr_fn(expr, n_args = 1):
"""Given an expression, return a function which applies that expression to arguments"""
return simple_untyped_fn(expr.__name__ + "_fn", expr, n_args)
_untyped_cast_wrappers = {}
def build_untyped_cast_fn(t):
if not isinstance(t, Type):
t = type_conv.equiv_type(t)
assert isinstance(t, ScalarT), "Expected scalar type but got %s" % t
return simple_untyped_fn("cast_" + str(t), Cast, 1, keyword_args = {'type': t})
| bsd-3-clause |
kuiwei/kuiwei | common/djangoapps/external_auth/tests/test_openid_provider.py | 46 | 16144 | #-*- encoding=utf-8 -*-
'''
Created on Jan 18, 2013
@author: brian
'''
import openid
import json
from openid.fetchers import HTTPFetcher, HTTPResponse
from urlparse import parse_qs, urlparse
from django.conf import settings
from django.test import TestCase, LiveServerTestCase
from django.core.cache import cache
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from unittest import skipUnless
from student.tests.factories import UserFactory
from external_auth.views import provider_login
class MyFetcher(HTTPFetcher):
"""A fetcher that uses server-internal calls for performing HTTP
requests.
"""
def __init__(self, client):
"""@param client: A test client object"""
super(MyFetcher, self).__init__()
self.client = client
def fetch(self, url, body=None, headers=None):
"""Perform an HTTP request
@raises Exception: Any exception that can be raised by Django
@see: C{L{HTTPFetcher.fetch}}
"""
if body:
# method = 'POST'
# undo the URL encoding of the POST arguments
data = parse_qs(body)
response = self.client.post(url, data)
else:
# method = 'GET'
data = {}
if headers and 'Accept' in headers:
data['CONTENT_TYPE'] = headers['Accept']
response = self.client.get(url, data)
# Translate the test client response to the fetcher's HTTP response abstraction
content = response.content
final_url = url
response_headers = {}
if 'Content-Type' in response:
response_headers['content-type'] = response['Content-Type']
if 'X-XRDS-Location' in response:
response_headers['x-xrds-location'] = response['X-XRDS-Location']
status = response.status_code
return HTTPResponse(
body=content,
final_url=final_url,
headers=response_headers,
status=status,
)
class OpenIdProviderTest(TestCase):
"""
Tests of the OpenId login
"""
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_begin_login_with_xrds_url(self):
# the provider URL must be converted to an absolute URL in order to be
# used as an openid provider.
provider_url = reverse('openid-provider-xrds')
factory = RequestFactory()
request = factory.request()
abs_provider_url = request.build_absolute_uri(location=provider_url)
# In order for this absolute URL to work (i.e. to get xrds, then authentication)
# in the test environment, we either need a live server that works with the default
# fetcher (i.e. urlopen2), or a test server that is reached through a custom fetcher.
# Here we do the latter:
fetcher = MyFetcher(self.client)
openid.fetchers.setDefaultFetcher(fetcher, wrap_exceptions=False)
# now we can begin the login process by invoking a local openid client,
# with a pointer to the (also-local) openid provider:
with self.settings(OPENID_SSO_SERVER_URL=abs_provider_url):
url = reverse('openid-login')
resp = self.client.post(url)
code = 200
self.assertEqual(resp.status_code, code,
"got code {0} for url '{1}'. Expected code {2}"
.format(resp.status_code, url, code))
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_begin_login_with_login_url(self):
# the provider URL must be converted to an absolute URL in order to be
# used as an openid provider.
provider_url = reverse('openid-provider-login')
factory = RequestFactory()
request = factory.request()
abs_provider_url = request.build_absolute_uri(location=provider_url)
# In order for this absolute URL to work (i.e. to get xrds, then authentication)
# in the test environment, we either need a live server that works with the default
# fetcher (i.e. urlopen2), or a test server that is reached through a custom fetcher.
# Here we do the latter:
fetcher = MyFetcher(self.client)
openid.fetchers.setDefaultFetcher(fetcher, wrap_exceptions=False)
# now we can begin the login process by invoking a local openid client,
# with a pointer to the (also-local) openid provider:
with self.settings(OPENID_SSO_SERVER_URL=abs_provider_url):
url = reverse('openid-login')
resp = self.client.post(url)
code = 200
self.assertEqual(resp.status_code, code,
"got code {0} for url '{1}'. Expected code {2}"
.format(resp.status_code, url, code))
self.assertContains(resp, '<input name="openid.mode" type="hidden" value="checkid_setup" />', html=True)
self.assertContains(resp, '<input name="openid.ns" type="hidden" value="http://specs.openid.net/auth/2.0" />', html=True)
self.assertContains(resp, '<input name="openid.identity" type="hidden" value="http://specs.openid.net/auth/2.0/identifier_select" />', html=True)
self.assertContains(resp, '<input name="openid.claimed_id" type="hidden" value="http://specs.openid.net/auth/2.0/identifier_select" />', html=True)
self.assertContains(resp, '<input name="openid.ns.ax" type="hidden" value="http://openid.net/srv/ax/1.0" />', html=True)
self.assertContains(resp, '<input name="openid.ax.mode" type="hidden" value="fetch_request" />', html=True)
self.assertContains(resp, '<input name="openid.ax.required" type="hidden" value="email,fullname,old_email,firstname,old_nickname,lastname,old_fullname,nickname" />', html=True)
self.assertContains(resp, '<input name="openid.ax.type.fullname" type="hidden" value="http://axschema.org/namePerson" />', html=True)
self.assertContains(resp, '<input name="openid.ax.type.lastname" type="hidden" value="http://axschema.org/namePerson/last" />', html=True)
self.assertContains(resp, '<input name="openid.ax.type.firstname" type="hidden" value="http://axschema.org/namePerson/first" />', html=True)
self.assertContains(resp, '<input name="openid.ax.type.nickname" type="hidden" value="http://axschema.org/namePerson/friendly" />', html=True)
self.assertContains(resp, '<input name="openid.ax.type.email" type="hidden" value="http://axschema.org/contact/email" />', html=True)
self.assertContains(resp, '<input name="openid.ax.type.old_email" type="hidden" value="http://schema.openid.net/contact/email" />', html=True)
self.assertContains(resp, '<input name="openid.ax.type.old_nickname" type="hidden" value="http://schema.openid.net/namePerson/friendly" />', html=True)
self.assertContains(resp, '<input name="openid.ax.type.old_fullname" type="hidden" value="http://schema.openid.net/namePerson" />', html=True)
self.assertContains(resp, '<input type="submit" value="Continue" />', html=True)
# this should work on the server:
self.assertContains(resp, '<input name="openid.realm" type="hidden" value="http://testserver/" />', html=True)
# not included here are elements that will vary from run to run:
# <input name="openid.return_to" type="hidden" value="http://testserver/openid/complete/?janrain_nonce=2013-01-23T06%3A20%3A17ZaN7j6H" />
# <input name="openid.assoc_handle" type="hidden" value="{HMAC-SHA1}{50ff8120}{rh87+Q==}" />
def attempt_login(self, expected_code, **kwargs):
""" Attempt to log in through the open id provider login """
url = reverse('openid-provider-login')
post_args = {
"openid.mode": "checkid_setup",
"openid.return_to": "http://testserver/openid/complete/?janrain_nonce=2013-01-23T06%3A20%3A17ZaN7j6H",
"openid.assoc_handle": "{HMAC-SHA1}{50ff8120}{rh87+Q==}",
"openid.claimed_id": "http://specs.openid.net/auth/2.0/identifier_select",
"openid.ns": "http://specs.openid.net/auth/2.0",
"openid.realm": "http://testserver/",
"openid.identity": "http://specs.openid.net/auth/2.0/identifier_select",
"openid.ns.ax": "http://openid.net/srv/ax/1.0",
"openid.ax.mode": "fetch_request",
"openid.ax.required": "email,fullname,old_email,firstname,old_nickname,lastname,old_fullname,nickname",
"openid.ax.type.fullname": "http://axschema.org/namePerson",
"openid.ax.type.lastname": "http://axschema.org/namePerson/last",
"openid.ax.type.firstname": "http://axschema.org/namePerson/first",
"openid.ax.type.nickname": "http://axschema.org/namePerson/friendly",
"openid.ax.type.email": "http://axschema.org/contact/email",
"openid.ax.type.old_email": "http://schema.openid.net/contact/email",
"openid.ax.type.old_nickname": "http://schema.openid.net/namePerson/friendly",
"openid.ax.type.old_fullname": "http://schema.openid.net/namePerson",
}
# override the default args with any given arguments
for key in kwargs:
post_args["openid." + key] = kwargs[key]
resp = self.client.post(url, post_args)
code = expected_code
self.assertEqual(resp.status_code, code,
"got code {0} for url '{1}'. Expected code {2}"
.format(resp.status_code, url, code))
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_open_id_setup(self):
""" Attempt a standard successful login """
self.attempt_login(200)
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_invalid_namespace(self):
""" Test for 403 error code when the namespace of the request is invalid"""
self.attempt_login(403, ns="http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0")
@override_settings(OPENID_PROVIDER_TRUSTED_ROOTS=['http://apps.cs50.edx.org'])
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_invalid_return_url(self):
""" Test for 403 error code when the url"""
self.attempt_login(403, return_to="http://apps.cs50.edx.or")
def _send_bad_redirection_login(self):
"""
Attempt to log in to the provider with setup parameters
Intentionally fail the login to force a redirect
"""
user = UserFactory()
factory = RequestFactory()
post_params = {'email': user.email, 'password': 'password'}
fake_url = 'fake url'
request = factory.post(reverse('openid-provider-login'), post_params)
openid_setup = {
'request': factory.request(),
'url': fake_url
}
request.session = {
'openid_setup': openid_setup
}
response = provider_login(request)
return response
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_login_openid_handle_redirection(self):
""" Test to see that we can handle login redirection properly"""
response = self._send_bad_redirection_login()
self.assertEquals(response.status_code, 302)
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_login_openid_handle_redirection_ratelimited(self):
# try logging in 30 times, the default limit in the number of failed
# log in attempts before the rate gets limited
for _ in xrange(30):
self._send_bad_redirection_login()
response = self._send_bad_redirection_login()
# verify that we are not returning the default 403
self.assertEquals(response.status_code, 302)
# clear the ratelimit cache so that we don't fail other logins
cache.clear()
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_openid_final_response(self):
url = reverse('openid-provider-login')
user = UserFactory()
# login to the client so that we can persist session information
for name in ['Robot 33', '☃']:
user.profile.name = name
user.profile.save()
self.client.login(username=user.username, password='test')
# login once to get the right session information
self.attempt_login(200)
post_args = {
'email': user.email,
'password': 'test',
}
# call url again, this time with username and password
resp = self.client.post(url, post_args)
# all information is embedded in the redirect url
location = resp['Location']
# parse the url
parsed_url = urlparse(location)
parsed_qs = parse_qs(parsed_url.query)
self.assertEquals(parsed_qs['openid.ax.type.ext1'][0], 'http://axschema.org/contact/email')
self.assertEquals(parsed_qs['openid.ax.type.ext0'][0], 'http://axschema.org/namePerson')
self.assertEquals(parsed_qs['openid.ax.value.ext1.1'][0], user.email)
self.assertEquals(parsed_qs['openid.ax.value.ext0.1'][0], user.profile.name)
class OpenIdProviderLiveServerTest(LiveServerTestCase):
"""
In order for this absolute URL to work (i.e. to get xrds, then authentication)
in the test environment, we either need a live server that works with the default
fetcher (i.e. urlopen2), or a test server that is reached through a custom fetcher.
Here we do the former.
"""
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_begin_login(self):
# the provider URL must be converted to an absolute URL in order to be
# used as an openid provider.
provider_url = reverse('openid-provider-xrds')
factory = RequestFactory()
request = factory.request()
abs_provider_url = request.build_absolute_uri(location=provider_url)
# now we can begin the login process by invoking a local openid client,
# with a pointer to the (also-local) openid provider:
with self.settings(OPENID_SSO_SERVER_URL=abs_provider_url):
url = reverse('openid-login')
resp = self.client.post(url)
code = 200
self.assertEqual(resp.status_code, code,
"got code {0} for url '{1}'. Expected code {2}"
.format(resp.status_code, url, code))
@classmethod
def tearDownClass(cls):
"""
Workaround for a runtime error that occurs
intermittently when the server thread doesn't shut down
within 2 seconds.
Since the server is running in a Django thread and will
be terminated when the test suite terminates,
this shouldn't cause a resource allocation issue.
"""
try:
super(OpenIdProviderLiveServerTest, cls).tearDownClass()
except RuntimeError:
print "Warning: Could not shut down test server."
| agpl-3.0 |
jsgf/xen | tools/python/logging/logging-0.4.9.2/test/log_test2.py | 42 | 4047 | #!/usr/bin/env python
#
# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# This file is part of the Python logging distribution. See
# http://www.red-dove.com/python_logging.html
#
"""
A test harness for the logging module. Tests logger levels and basic Formatter, and logging to
sockets.
Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved.
"""
import logging, logging.handlers, socket
msgcount = 0
def nextmessage():
global msgcount
rv = "Message %d" % msgcount
msgcount = msgcount + 1
return rv
def main():
logging.basicConfig()
logging.getLogger("").setLevel(logging.DEBUG)
hdlr = logging.handlers.SocketHandler('localhost', logging.handlers.DEFAULT_TCP_LOGGING_PORT)
if __name__ == "__main__":
hdlr.setFormatter(logging.Formatter("%(asctime)s %(name)-19s %(levelname)-5s - %(message)s"))
logging.getLogger("").addHandler(hdlr)
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
#These should log
ERR.log(logging.CRITICAL, nextmessage())
ERR.error(nextmessage())
INF.log(logging.CRITICAL, nextmessage())
INF.error(nextmessage())
INF.warning(nextmessage())
INF.info(nextmessage())
INF_UNDEF.log(logging.CRITICAL, nextmessage())
INF_UNDEF.error(nextmessage())
INF_UNDEF.warning(nextmessage())
INF_UNDEF.info(nextmessage())
INF_ERR.log(logging.CRITICAL, nextmessage())
INF_ERR.error(nextmessage())
INF_ERR_UNDEF.log(logging.CRITICAL, nextmessage())
INF_ERR_UNDEF.error(nextmessage())
DEB.log(logging.CRITICAL, nextmessage())
DEB.error(nextmessage())
DEB.warning(nextmessage())
DEB.info(nextmessage())
DEB.debug(nextmessage())
UNDEF.log(logging.CRITICAL, nextmessage())
UNDEF.error(nextmessage())
UNDEF.warning(nextmessage())
UNDEF.info(nextmessage())
GRANDCHILD.log(logging.CRITICAL, nextmessage())
CHILD.log(logging.CRITICAL, nextmessage())
#These should not log
ERR.warning(nextmessage())
ERR.info(nextmessage())
ERR.debug(nextmessage())
INF.debug(nextmessage())
INF_UNDEF.debug(nextmessage())
INF_ERR.warning(nextmessage())
INF_ERR.info(nextmessage())
INF_ERR.debug(nextmessage())
INF_ERR_UNDEF.warning(nextmessage())
INF_ERR_UNDEF.info(nextmessage())
INF_ERR_UNDEF.debug(nextmessage())
INF.info("Messages should bear numbers 0 through 24.")
hdlr.close()
logging.getLogger("").removeHandler(hdlr)
if __name__ == "__main__":
try:
main()
except socket.error:
print "\nA socket error occurred. Ensure that logrecv.py is running to receive logging requests from this script."
| gpl-2.0 |
meteorcloudy/tensorflow | tensorflow/contrib/layers/python/layers/feature_column_ops.py | 31 | 37478 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to FeatureColumn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import embedding_ops
from tensorflow.contrib.layers.python.layers import feature_column as fc
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_py
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
def _maybe_reshape_input_tensor(tensor, column_name, output_rank):
"""Reshape the input tensor by the following rule.
1. If `output_rank > input_rank + 1`, raise a `ValueError`.
2. If `output_rank == input_rank + 1`, expand the tensor by one dimension.
3. If `output_rank == input_rank`, do nothing.
4. If `output_rank < input_rank`, flatten the inner dimensions of the tensor.
Args:
tensor: A Tensor or SparseTensor to be reshaped.
column_name: A string name of the feature column for the tensor.
output_rank: the desired rank of the tensor.
Returns:
A reshaped Tensor or SparseTensor.
Raises:
ValueError: if `output_rank > input_rank + 1` for the input tensor.
"""
input_rank = tensor.get_shape().ndims
if input_rank is None and isinstance(tensor, sparse_tensor_py.SparseTensor):
# Try to get the rank of a sparse tensor by its dense_shape's shape.
input_rank = tensor.dense_shape.get_shape().as_list()[0]
if input_rank is None:
raise ValueError('Error while processing column {}. Rank of input Tensor '
'can not be None.'.format(column_name))
if output_rank > input_rank + 1:
raise ValueError('Error while processing column {}. Rank of input Tensor '
'({}) should be the same as output_rank ({}). For '
'example, sequence data should typically be 3 '
'dimensional (rank 3) while non-sequence data is '
'typically 2 dimensional (rank 2).'.format(
column_name, input_rank, output_rank))
elif output_rank == input_rank + 1:
# Expand the tensor's shape by 1 dimension.
if isinstance(tensor, sparse_tensor_py.SparseTensor):
output_shape = array_ops.concat([tensor.dense_shape, [1]], 0)
return sparse_ops.sparse_reshape(tensor, output_shape)
else:
reshaped = array_ops.expand_dims(tensor, -1)
# Try to calculate the new shape.
static_shape = tensor.get_shape()
if static_shape is not None and static_shape.dims is not None:
reshaped.set_shape(static_shape.as_list() + [1])
return reshaped
elif output_rank < input_rank:
return layers._inner_flatten(tensor, output_rank) # pylint: disable=protected-access
else:
return tensor
def _input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections,
trainable,
scope,
output_rank,
default_name,
cols_to_outs=None):
"""Implementation of `input_from(_sequence)_feature_columns`."""
columns_to_tensors = columns_to_tensors.copy()
check_feature_columns(feature_columns)
if cols_to_outs is not None and not isinstance(cols_to_outs, dict):
raise ValueError('cols_to_outs must be a dict unless None')
with variable_scope.variable_scope(scope,
default_name=default_name,
values=columns_to_tensors.values()):
output_tensors = []
transformer = _Transformer(columns_to_tensors)
if weight_collections:
weight_collections = list(set(list(weight_collections) +
[ops.GraphKeys.GLOBAL_VARIABLES]))
for column in sorted(set(feature_columns), key=lambda x: x.key):
with variable_scope.variable_scope(None,
default_name=column.name,
values=columns_to_tensors.values()):
transformed_tensor = transformer.transform(column)
if output_rank == 3:
transformed_tensor = nest.map_structure(
functools.partial(
_maybe_reshape_input_tensor,
column_name=column.name,
output_rank=output_rank), transformed_tensor)
try:
# pylint: disable=protected-access
arguments = column._deep_embedding_lookup_arguments(
transformed_tensor)
output_tensors.append(
fc._embeddings_from_arguments( # pylint: disable=protected-access
column,
arguments,
weight_collections,
trainable,
output_rank=output_rank))
except NotImplementedError as ee:
try:
# pylint: disable=protected-access
output_tensors.append(column._to_dnn_input_layer(
transformed_tensor,
weight_collections,
trainable,
output_rank=output_rank))
except ValueError as e:
raise ValueError('Error creating input layer for column: {}.\n'
'{}, {}'.format(column.name, e, ee))
if cols_to_outs is not None:
cols_to_outs[column] = output_tensors[-1]
return array_ops.concat(output_tensors, output_rank - 1)
def input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections=None,
trainable=True,
scope=None,
cols_to_outs=None):
"""A tf.contrib.layers style input layer builder based on FeatureColumns.
Generally a single example in training data is described with feature columns.
At the first layer of the model, this column oriented data should be converted
to a single tensor. Each feature column needs a different kind of operation
during this conversion. For example sparse features need a totally different
handling than continuous features.
Example:
```python
# Building model for training
columns_to_tensor = tf.parse_example(...)
first_layer = input_from_feature_columns(
columns_to_tensors=columns_to_tensor,
feature_columns=feature_columns)
second_layer = fully_connected(inputs=first_layer, ...)
...
```
where feature_columns can be defined as follows:
```python
sparse_feature = sparse_column_with_hash_bucket(
column_name="sparse_col", ...)
sparse_feature_emb = embedding_column(sparse_id_column=sparse_feature, ...)
real_valued_feature = real_valued_column(...)
real_valued_buckets = bucketized_column(
source_column=real_valued_feature, ...)
feature_columns=[sparse_feature_emb, real_valued_buckets]
```
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived by FeatureColumn.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
cols_to_outs: Optional dict from feature column to output tensor,
which is concatenated into the returned tensor.
Returns:
A Tensor which can be consumed by hidden layers in the neural network.
Raises:
ValueError: if FeatureColumn cannot be consumed by a neural network.
"""
return _input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections,
trainable,
scope,
output_rank=2,
default_name='input_from_feature_columns',
cols_to_outs=cols_to_outs)
@experimental
def sequence_input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections=None,
trainable=True,
scope=None):
"""Builds inputs for sequence models from `FeatureColumn`s.
See documentation for `input_from_feature_columns`. The following types of
`FeatureColumn` are permitted in `feature_columns`: `_OneHotColumn`,
`_EmbeddingColumn`, `_ScatteredEmbeddingColumn`, `_RealValuedColumn`,
`_DataFrameColumn`. In addition, columns in `feature_columns` may not be
constructed using any of the following: `ScatteredEmbeddingColumn`,
`BucketizedColumn`, `CrossedColumn`.
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived by FeatureColumn.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
A Tensor which can be consumed by hidden layers in the neural network.
Raises:
ValueError: if FeatureColumn cannot be consumed by a neural network.
"""
_check_supported_sequence_columns(feature_columns)
_check_forbidden_sequence_columns(feature_columns)
return _input_from_feature_columns(
columns_to_tensors,
feature_columns,
weight_collections,
trainable,
scope,
output_rank=3,
default_name='sequence_input_from_feature_columns')
def _create_embedding_lookup(column,
columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections):
"""Creates variables and returns predictions for linear weights in a model.
Args:
column: the column we're working on.
columns_to_tensors: a map from column name to tensors.
embedding_lookup_arguments: arguments for embedding lookup.
num_outputs: how many outputs.
trainable: whether the variable we create is trainable.
weight_collections: weights will be placed here.
Returns:
variables: the created embeddings.
predictions: the computed predictions.
"""
with variable_scope.variable_scope(
None, default_name=column.name, values=columns_to_tensors.values()):
variable = contrib_variables.model_variable(
name='weights',
shape=[embedding_lookup_arguments.vocab_size, num_outputs],
dtype=dtypes.float32,
initializer=embedding_lookup_arguments.initializer,
trainable=trainable,
collections=weight_collections)
if fc._is_variable(variable): # pylint: disable=protected-access
variable = [variable]
else:
variable = variable._get_variable_list() # pylint: disable=protected-access
predictions = embedding_ops.safe_embedding_lookup_sparse(
variable,
embedding_lookup_arguments.input_tensor,
sparse_weights=embedding_lookup_arguments.weight_tensor,
combiner=embedding_lookup_arguments.combiner,
name=column.name + '_weights')
return variable, predictions
def _create_joint_embedding_lookup(columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections):
"""Creates an embedding lookup for all columns sharing a single weight."""
for arg in embedding_lookup_arguments:
assert arg.weight_tensor is None, (
'Joint sums for weighted sparse columns are not supported. '
'Please use weighted_sum_from_feature_columns instead.')
assert arg.combiner == 'sum', (
'Combiners other than sum are not supported for joint sums. '
'Please use weighted_sum_from_feature_columns instead.')
assert len(embedding_lookup_arguments) >= 1, (
'At least one column must be in the model.')
prev_size = 0
sparse_tensors = []
for a in embedding_lookup_arguments:
t = a.input_tensor
values = t.values + prev_size
prev_size += a.vocab_size
sparse_tensors.append(
sparse_tensor_py.SparseTensor(t.indices,
values,
t.dense_shape))
sparse_tensor = sparse_ops.sparse_concat(1, sparse_tensors)
with variable_scope.variable_scope(
None, default_name='linear_weights', values=columns_to_tensors.values()):
variable = contrib_variables.model_variable(
name='weights',
shape=[prev_size, num_outputs],
dtype=dtypes.float32,
initializer=init_ops.zeros_initializer(),
trainable=trainable,
collections=weight_collections)
if fc._is_variable(variable): # pylint: disable=protected-access
variable = [variable]
else:
variable = variable._get_variable_list() # pylint: disable=protected-access
predictions = embedding_ops.safe_embedding_lookup_sparse(
variable,
sparse_tensor,
sparse_weights=None,
combiner='sum',
name='_weights')
return variable, predictions
def joint_weighted_sum_from_feature_columns(columns_to_tensors,
feature_columns,
num_outputs,
weight_collections=None,
trainable=True,
scope=None):
"""A restricted linear prediction builder based on FeatureColumns.
As long as all feature columns are unweighted sparse columns this computes the
prediction of a linear model which stores all weights in a single variable.
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived from FeatureColumn.
num_outputs: An integer specifying number of outputs. Default value is 1.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
A tuple containing:
* A Tensor which represents predictions of a linear model.
* A list of Variables storing the weights.
* A Variable which is used for bias.
Raises:
ValueError: if FeatureColumn cannot be used for linear predictions.
"""
columns_to_tensors = columns_to_tensors.copy()
check_feature_columns(feature_columns)
with variable_scope.variable_scope(
scope,
default_name='joint_weighted_sum_from_feature_columns',
values=columns_to_tensors.values()):
transformer = _Transformer(columns_to_tensors)
embedding_lookup_arguments = []
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformed_tensor = transformer.transform(column)
try:
embedding_lookup_arguments.append(
column._wide_embedding_lookup_arguments(transformed_tensor)) # pylint: disable=protected-access
except NotImplementedError:
raise NotImplementedError('Real-valued columns are not supported. '
'Use weighted_sum_from_feature_columns '
'instead, or bucketize these columns.')
variable, predictions_no_bias = _create_joint_embedding_lookup(
columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections)
bias = contrib_variables.model_variable(
'bias_weight',
shape=[num_outputs],
initializer=init_ops.zeros_initializer(),
trainable=trainable,
collections=_add_variable_collection(weight_collections))
_log_variable(bias)
predictions = nn_ops.bias_add(predictions_no_bias, bias)
return predictions, variable, bias
def weighted_sum_from_feature_columns(columns_to_tensors,
feature_columns,
num_outputs,
weight_collections=None,
trainable=True,
scope=None):
"""A tf.contrib.layers style linear prediction builder based on FeatureColumn.
Generally a single example in training data is described with feature columns.
This function generates weighted sum for each num_outputs. Weighted sum refers
to logits in classification problems. It refers to prediction itself for
linear regression problems.
Example:
```
# Building model for training
feature_columns = (
real_valued_column("my_feature1"),
...
)
columns_to_tensor = tf.parse_example(...)
logits = weighted_sum_from_feature_columns(
columns_to_tensors=columns_to_tensor,
feature_columns=feature_columns,
num_outputs=1)
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,
logits=logits)
```
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived from FeatureColumn.
num_outputs: An integer specifying number of outputs. Default value is 1.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
A tuple containing:
* A Tensor which represents predictions of a linear model.
* A dictionary which maps feature_column to corresponding Variable.
* A Variable which is used for bias.
Raises:
ValueError: if FeatureColumn cannot be used for linear predictions.
"""
columns_to_tensors = columns_to_tensors.copy()
check_feature_columns(feature_columns)
with variable_scope.variable_scope(
scope,
default_name='weighted_sum_from_feature_columns',
values=columns_to_tensors.values()):
output_tensors = []
column_to_variable = dict()
transformer = _Transformer(columns_to_tensors)
# pylint: disable=protected-access
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformed_tensor = transformer.transform(column)
try:
embedding_lookup_arguments = column._wide_embedding_lookup_arguments(
transformed_tensor)
variable, predictions = _create_embedding_lookup(
column,
columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections)
except NotImplementedError:
with variable_scope.variable_scope(
None,
default_name=column.name,
values=columns_to_tensors.values()):
tensor = column._to_dense_tensor(transformed_tensor)
tensor = _maybe_reshape_input_tensor(
tensor, column.name, output_rank=2)
variable = [
contrib_variables.model_variable(
name='weight',
shape=[tensor.get_shape()[1], num_outputs],
initializer=init_ops.zeros_initializer(),
trainable=trainable,
collections=weight_collections)
]
predictions = math_ops.matmul(tensor, variable[0], name='matmul')
except ValueError as ee:
raise ValueError('Error creating weighted sum for column: {}.\n'
'{}'.format(column.name, ee))
output_tensors.append(array_ops.reshape(
predictions, shape=(-1, num_outputs)))
column_to_variable[column] = variable
_log_variable(variable)
fc._maybe_restore_from_checkpoint(column._checkpoint_path(), variable) # pylint: disable=protected-access
# pylint: enable=protected-access
predictions_no_bias = math_ops.add_n(output_tensors)
bias = contrib_variables.model_variable(
'bias_weight',
shape=[num_outputs],
initializer=init_ops.zeros_initializer(),
trainable=trainable,
collections=_add_variable_collection(weight_collections))
_log_variable(bias)
predictions = nn_ops.bias_add(predictions_no_bias, bias)
return predictions, column_to_variable, bias
def parse_feature_columns_from_examples(serialized,
feature_columns,
name=None,
example_names=None):
"""Parses tf.Examples to extract tensors for given feature_columns.
This is a wrapper of 'tf.parse_example'.
Example:
```python
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=my_features)
# Where my_features are:
# Define features and transformations
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature, boundaries=[...])
my_features = [embedding_feature_b, real_feature_buckets, embedding_feature_a]
```
Args:
serialized: A vector (1-D Tensor) of strings, a batch of binary
serialized `Example` protos.
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn.
name: A name for this operation (optional).
example_names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos in the batch.
Returns:
A `dict` mapping FeatureColumn to `Tensor` and `SparseTensor` values.
"""
check_feature_columns(feature_columns)
columns_to_tensors = parsing_ops.parse_example(
serialized=serialized,
features=fc.create_feature_spec_for_parsing(feature_columns),
name=name,
example_names=example_names)
transformer = _Transformer(columns_to_tensors)
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformer.transform(column)
return columns_to_tensors
def transform_features(features, feature_columns):
"""Returns transformed features based on features columns passed in.
Example:
```python
columns_to_tensor = transform_features(features=features,
feature_columns=feature_columns)
# Where my_features are:
# Define features and transformations
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature, boundaries=[...])
feature_columns = [embedding_feature_b,
real_feature_buckets,
embedding_feature_a]
```
Args:
features: A dictionary of features.
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn.
Returns:
A `dict` mapping FeatureColumn to `Tensor` and `SparseTensor` values.
"""
columns_to_tensor = features.copy()
check_feature_columns(feature_columns)
transformer = _Transformer(columns_to_tensor)
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformer.transform(column)
keys = list(columns_to_tensor.keys())
for k in keys:
if k not in feature_columns:
columns_to_tensor.pop(k)
return columns_to_tensor
def parse_feature_columns_from_sequence_examples(
serialized,
context_feature_columns,
sequence_feature_columns,
name=None,
example_name=None):
"""Parses tf.SequenceExamples to extract tensors for given `FeatureColumn`s.
Args:
serialized: A scalar (0-D Tensor) of type string, a single serialized
`SequenceExample` proto.
context_feature_columns: An iterable containing the feature columns for
context features. All items should be instances of classes derived from
`_FeatureColumn`. Can be `None`.
sequence_feature_columns: An iterable containing the feature columns for
sequence features. All items should be instances of classes derived from
`_FeatureColumn`. Can be `None`.
name: A name for this operation (optional).
example_name: A scalar (0-D Tensor) of type string (optional), the names of
the serialized proto.
Returns:
A tuple consisting of (context_features, sequence_features)
* context_features: a dict mapping `FeatureColumns` from
`context_feature_columns` to their parsed `Tensors`/`SparseTensor`s.
* sequence_features: a dict mapping `FeatureColumns` from
`sequence_feature_columns` to their parsed `Tensors`/`SparseTensor`s.
"""
# Sequence example parsing requires a single (scalar) example.
try:
serialized = array_ops.reshape(serialized, [])
except ValueError as e:
raise ValueError(
'serialized must contain as single sequence example. Batching must be '
'done after parsing for sequence examples. Error: {}'.format(e))
if context_feature_columns is None:
context_feature_columns = []
if sequence_feature_columns is None:
sequence_feature_columns = []
check_feature_columns(context_feature_columns)
context_feature_spec = fc.create_feature_spec_for_parsing(
context_feature_columns)
check_feature_columns(sequence_feature_columns)
sequence_feature_spec = fc._create_sequence_feature_spec_for_parsing( # pylint: disable=protected-access
sequence_feature_columns, allow_missing_by_default=False)
return parsing_ops.parse_single_sequence_example(serialized,
context_feature_spec,
sequence_feature_spec,
example_name,
name)
def _log_variable(variable):
if isinstance(variable, list):
for var in variable:
if fc._is_variable(variable): # pylint: disable=protected-access
logging.info('Created variable %s, with device=%s', var.name,
var.device)
elif fc._is_variable(variable): # pylint: disable=protected-access
logging.info('Created variable %s, with device=%s', variable.name,
variable.device)
def _infer_real_valued_column_for_tensor(name, tensor):
"""Creates a real_valued_column for given tensor and name."""
if isinstance(tensor, sparse_tensor_py.SparseTensor):
raise ValueError(
'SparseTensor is not supported for auto detection. Please define '
'corresponding FeatureColumn for tensor {} {}.', name, tensor)
if not (tensor.dtype.is_integer or tensor.dtype.is_floating):
raise ValueError(
'Non integer or non floating types are not supported for auto detection'
'. Please define corresponding FeatureColumn for tensor {} {}.', name,
tensor)
shape = tensor.get_shape().as_list()
dimension = 1
for i in range(1, len(shape)):
dimension *= shape[i]
return fc.real_valued_column(name, dimension=dimension, dtype=tensor.dtype)
def infer_real_valued_columns(features):
if not isinstance(features, dict):
return [_infer_real_valued_column_for_tensor('', features)]
feature_columns = []
for key, value in features.items():
feature_columns.append(_infer_real_valued_column_for_tensor(key, value))
return feature_columns
def check_feature_columns(feature_columns):
"""Checks the validity of the set of FeatureColumns.
Args:
feature_columns: An iterable of instances or subclasses of FeatureColumn.
Raises:
ValueError: If `feature_columns` is a dict.
ValueError: If there are duplicate feature column keys.
"""
if isinstance(feature_columns, dict):
raise ValueError('Expected feature_columns to be iterable, found dict.')
seen_keys = set()
for f in feature_columns:
key = f.key
if key in seen_keys:
raise ValueError('Duplicate feature column key found for column: {}. '
'This usually means that the column is almost identical '
'to another column, and one must be discarded.'.format(
f.name))
seen_keys.add(key)
class _Transformer(object):
"""Handles all the transformations defined by FeatureColumn if needed.
FeatureColumn specifies how to digest an input column to the network. Some
feature columns require data transformations. This class handles those
transformations if they are not handled already.
Some features may be used in more than one place. For example, one can use a
bucketized feature by itself and a cross with it. In that case Transformer
should create only one bucketization op instead of multiple ops for each
feature column. To handle re-use of transformed columns, Transformer keeps all
previously transformed columns.
Example:
```python
sparse_feature = sparse_column_with_hash_bucket(...)
real_valued_feature = real_valued_column(...)
real_valued_buckets = bucketized_column(source_column=real_valued_feature,
...)
sparse_x_real = crossed_column(
columns=[sparse_feature, real_valued_buckets], hash_bucket_size=10000)
columns_to_tensor = tf.parse_example(...)
transformer = Transformer(columns_to_tensor)
sparse_x_real_tensor = transformer.transform(sparse_x_real)
sparse_tensor = transformer.transform(sparse_feature)
real_buckets_tensor = transformer.transform(real_valued_buckets)
```
"""
def __init__(self, columns_to_tensors):
"""Initializes transformer.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have FeatureColumn as
a key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
Transformed features are inserted in columns_to_tensors.
"""
self._columns_to_tensors = columns_to_tensors
def transform(self, feature_column):
"""Returns a Tensor which represents given feature_column.
Args:
feature_column: An instance of FeatureColumn.
Returns:
A Tensor which represents given feature_column. It may create a new Tensor
or re-use an existing one.
Raises:
ValueError: if FeatureColumn cannot be handled by this Transformer.
"""
logging.debug('Transforming feature_column %s', feature_column)
if feature_column in self._columns_to_tensors:
# Feature_column is already transformed.
return self._columns_to_tensors[feature_column]
feature_column.insert_transformed_feature(self._columns_to_tensors)
if feature_column not in self._columns_to_tensors:
raise ValueError('Column {} is not supported.'.format(
feature_column.name))
return self._columns_to_tensors[feature_column]
def _add_variable_collection(weight_collections):
if weight_collections:
weight_collections = list(
set(list(weight_collections) + [ops.GraphKeys.GLOBAL_VARIABLES]))
return weight_collections
# TODO(jamieas): remove the following logic once all FeatureColumn types are
# supported for sequences.
# pylint: disable=protected-access
_SUPPORTED_SEQUENCE_COLUMNS = (fc._OneHotColumn,
fc._EmbeddingColumn,
fc._RealValuedColumn,
fc._RealValuedVarLenColumn)
_FORBIDDEN_SEQUENCE_COLUMNS = (fc._ScatteredEmbeddingColumn,
fc._BucketizedColumn,
fc._CrossedColumn)
def _check_supported_sequence_columns(feature_columns):
"""Asserts `feature_columns` are in `_SUPPORTED_SEQUENCE_COLUMNS`."""
for col in feature_columns:
if not isinstance(col, _SUPPORTED_SEQUENCE_COLUMNS):
raise ValueError(
'FeatureColumn type {} is not currently supported for sequence data.'.
format(type(col).__name__))
def _get_parent_columns(feature_column):
"""Returns the tuple of `FeatureColumn`s that `feature_column` depends on."""
if isinstance(feature_column, (fc._WeightedSparseColumn,
fc._OneHotColumn,
fc._EmbeddingColumn,)):
return (feature_column.sparse_id_column,)
if isinstance(feature_column, (fc._BucketizedColumn,)):
return (feature_column.source_column,)
if isinstance(feature_column, (fc._CrossedColumn)):
return tuple(feature_column.columns)
return tuple()
def _gather_feature_columns(feature_columns):
"""Returns a list of all ancestor `FeatureColumns` of `feature_columns`."""
gathered = list(feature_columns)
i = 0
while i < len(gathered):
for column in _get_parent_columns(gathered[i]):
if column not in gathered:
gathered.append(column)
i += 1
return gathered
def _check_forbidden_sequence_columns(feature_columns):
"""Recursively checks `feature_columns` for `_FORBIDDEN_SEQUENCE_COLUMNS`."""
all_feature_columns = _gather_feature_columns(feature_columns)
for feature_column in all_feature_columns:
if isinstance(feature_column, _FORBIDDEN_SEQUENCE_COLUMNS):
raise ValueError(
'Column {} is of type {}, which is not currently supported for '
'sequences.'.format(feature_column.name,
type(feature_column).__name__))
| apache-2.0 |
dan1/horizon-proto | horizon/tables/base.py | 8 | 70786 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import json
import logging
from operator import attrgetter
import sys
from django.core import exceptions as core_exceptions
from django.core import urlresolvers
from django import forms
from django.http import HttpResponse # noqa
from django import template
from django.template.defaultfilters import slugify # noqa
from django.template.defaultfilters import truncatechars # noqa
from django.template.loader import render_to_string
from django.utils.datastructures import SortedDict
from django.utils.html import escape
from django.utils import http
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from django.utils import termcolors
from django.utils.translation import ugettext_lazy as _
import six
from horizon import conf
from horizon import exceptions
from horizon import messages
from horizon.tables.actions import FilterAction # noqa
from horizon.tables.actions import LinkAction # noqa
from horizon.utils import html
LOG = logging.getLogger(__name__)
PALETTE = termcolors.PALETTES[termcolors.DEFAULT_PALETTE]
STRING_SEPARATOR = "__"
class Column(html.HTMLElement):
"""A class which represents a single column in a :class:`.DataTable`.
.. attribute:: transform
A string or callable. If ``transform`` is a string, it should be the
name of the attribute on the underlying data class which
should be displayed in this column. If it is a callable, it
will be passed the current row's data at render-time and should
return the contents of the cell. Required.
.. attribute:: verbose_name
The name for this column which should be used for display purposes.
Defaults to the value of ``transform`` with the first letter
of each word capitalized if the ``transform`` is not callable,
otherwise it defaults to an empty string (``""``).
.. attribute:: sortable
Boolean to determine whether this column should be sortable or not.
Defaults to ``True``.
.. attribute:: hidden
Boolean to determine whether or not this column should be displayed
when rendering the table. Default: ``False``.
.. attribute:: link
A string or callable which returns a URL which will be wrapped around
this column's text as a link.
.. attribute:: allowed_data_types
A list of data types for which the link should be created.
Default is an empty list (``[]``).
When the list is empty and the ``link`` attribute is not None, all the
rows under this column will be links.
.. attribute:: status
Boolean designating whether or not this column represents a status
(i.e. "enabled/disabled", "up/down", "active/inactive").
Default: ``False``.
.. attribute:: status_choices
A tuple of tuples representing the possible data values for the
status column and their associated boolean equivalent. Positive
states should equate to ``True``, negative states should equate
to ``False``, and indeterminate states should be ``None``.
Values are compared in a case-insensitive manner.
Example (these are also the default values)::
status_choices = (
('enabled', True),
('true', True)
('up', True),
('active', True),
('yes', True),
('on', True),
('none', None),
('unknown', None),
('', None),
('disabled', False),
('down', False),
('false', False),
('inactive', False),
('no', False),
('off', False),
)
.. attribute:: display_choices
A tuple of tuples representing the possible values to substitute
the data when displayed in the column cell.
.. attribute:: empty_value
A string or callable to be used for cells which have no data.
Defaults to the string ``"-"``.
.. attribute:: summation
A string containing the name of a summation method to be used in
the generation of a summary row for this column. By default the
options are ``"sum"`` or ``"average"``, which behave as expected.
Optional.
.. attribute:: filters
A list of functions (often template filters) to be applied to the
value of the data for this column prior to output. This is effectively
a shortcut for writing a custom ``transform`` function in simple cases.
.. attribute:: classes
An iterable of CSS classes which should be added to this column.
Example: ``classes=('foo', 'bar')``.
.. attribute:: attrs
A dict of HTML attribute strings which should be added to this column.
Example: ``attrs={"data-foo": "bar"}``.
.. attribute:: cell_attributes_getter
A callable to get the HTML attributes of a column cell depending
on the data. For example, to add additional description or help
information for data in a column cell (e.g. in Images panel, for the
column 'format'):
helpText = {
'ARI':'Amazon Ramdisk Image'
'QCOW2':'QEMU' Emulator'
}
getHoverHelp(data):
text = helpText.get(data, None)
if text:
return {'title': text}
else:
return {}
...
...
cell_attributes_getter = getHoverHelp
.. attribute:: truncate
An integer for the maximum length of the string in this column. If the
length of the data in this column is larger than the supplied number,
the data for this column will be truncated and an ellipsis will be
appended to the truncated data.
Defaults to ``None``.
.. attribute:: link_classes
An iterable of CSS classes which will be added when the column's text
is displayed as a link.
This is left for backward compatibility. Deprecated in favor of the
link_attributes attribute.
Example: ``link_classes=('link-foo', 'link-bar')``.
Defaults to ``None``.
.. attribute:: wrap_list
Boolean value indicating whether the contents of this cell should be
wrapped in a ``<ul></ul>`` tag. Useful in conjunction with Django's
``unordered_list`` template filter. Defaults to ``False``.
.. attribute:: form_field
A form field used for inline editing of the column. A django
forms.Field can be used or django form.Widget can be used.
Example: ``form_field=forms.CharField(required=True)``.
Defaults to ``None``.
.. attribute:: form_field_attributes
The additional html attributes that will be rendered to form_field.
Example: ``form_field_attributes={'class': 'bold_input_field'}``.
Defaults to ``None``.
.. attribute:: update_action
The class that inherits from tables.actions.UpdateAction, update_cell
method takes care of saving inline edited data. The tables.base.Row
get_data method needs to be connected to table for obtaining the data.
Example: ``update_action=UpdateCell``.
Defaults to ``None``.
.. attribute:: link_attrs
A dict of HTML attribute strings which should be added when the
column's text is displayed as a link.
Examples:
``link_attrs={"data-foo": "bar"}``.
``link_attrs={"target": "_blank", "class": "link-foo link-bar"}``.
Defaults to ``None``.
.. attribute:: help_text
A string of simple help text displayed in a tooltip when you hover
over the help icon beside the Column name. Defaults to ``None``.
"""
summation_methods = {
"sum": sum,
"average": lambda data: sum(data, 0.0) / len(data)
}
# Used to retain order when instantiating columns on a table
creation_counter = 0
transform = None
name = None
verbose_name = None
status_choices = (
('enabled', True),
('true', True),
('up', True),
('yes', True),
('active', True),
('on', True),
('none', None),
('unknown', None),
('', None),
('disabled', False),
('down', False),
('false', False),
('inactive', False),
('no', False),
('off', False),
)
def __init__(self, transform, verbose_name=None, sortable=True,
link=None, allowed_data_types=[], hidden=False, attrs=None,
status=False, status_choices=None, display_choices=None,
empty_value=None, filters=None, classes=None, summation=None,
auto=None, truncate=None, link_classes=None, wrap_list=False,
form_field=None, form_field_attributes=None,
update_action=None, link_attrs=None,
cell_attributes_getter=None, help_text=None):
self.classes = list(classes or getattr(self, "classes", []))
super(Column, self).__init__()
self.attrs.update(attrs or {})
if callable(transform):
self.transform = transform
self.name = "<%s callable>" % transform.__name__
else:
self.transform = six.text_type(transform)
self.name = self.transform
# Empty string is a valid value for verbose_name
if verbose_name is None:
if callable(transform):
self.verbose_name = ''
else:
self.verbose_name = self.transform.title()
else:
self.verbose_name = verbose_name
self.auto = auto
self.sortable = sortable
self.link = link
self.allowed_data_types = allowed_data_types
self.hidden = hidden
self.status = status
self.empty_value = empty_value or _('-')
self.filters = filters or []
self.truncate = truncate
self.wrap_list = wrap_list
self.form_field = form_field
self.form_field_attributes = form_field_attributes or {}
self.update_action = update_action
self.link_attrs = link_attrs or {}
self.help_text = help_text
if link_classes:
self.link_attrs['class'] = ' '.join(link_classes)
self.cell_attributes_getter = cell_attributes_getter
if status_choices:
self.status_choices = status_choices
self.display_choices = display_choices
if summation is not None and summation not in self.summation_methods:
raise ValueError("Summation method %s must be one of %s."
% (summation,
", ".join(self.summation_methods.keys())))
self.summation = summation
self.creation_counter = Column.creation_counter
Column.creation_counter += 1
if self.sortable and not self.auto:
self.classes.append("sortable")
if self.hidden:
self.classes.append("hide")
if self.link is not None:
self.classes.append('anchor')
def __unicode__(self):
return six.text_type(self.verbose_name)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.name)
def get_raw_data(self, datum):
"""Returns the raw data for this column, before any filters or
formatting are applied to it. This is useful when doing calculations
on data in the table.
"""
# Callable transformations
if callable(self.transform):
data = self.transform(datum)
# Dict lookups
elif isinstance(datum, collections.Mapping) and \
self.transform in datum:
data = datum.get(self.transform)
else:
# Basic object lookups
try:
data = getattr(datum, self.transform)
except AttributeError:
msg = _("The attribute %(attr)s doesn't exist on "
"%(obj)s.") % {'attr': self.transform, 'obj': datum}
msg = termcolors.colorize(msg, **PALETTE['ERROR'])
LOG.warning(msg)
data = None
return data
def get_data(self, datum):
"""Returns the final display data for this column from the given
inputs.
The return value will be either the attribute specified for this column
or the return value of the attr:`~horizon.tables.Column.transform`
method for this column.
"""
datum_id = self.table.get_object_id(datum)
if datum_id in self.table._data_cache[self]:
return self.table._data_cache[self][datum_id]
data = self.get_raw_data(datum)
display_value = None
if self.display_choices:
display_value = [display for (value, display) in
self.display_choices
if value.lower() == (data or '').lower()]
if display_value:
data = display_value[0]
else:
for filter_func in self.filters:
try:
data = filter_func(data)
except Exception:
msg = ("Filter '%(filter)s' failed with data "
"'%(data)s' on column '%(col_name)s'")
args = {'filter': filter_func.func_name,
'data': data,
'col_name': six.text_type(self.verbose_name)}
LOG.warning(msg, args)
if data and self.truncate:
data = truncatechars(data, self.truncate)
self.table._data_cache[self][datum_id] = data
return self.table._data_cache[self][datum_id]
def get_link_url(self, datum):
"""Returns the final value for the column's ``link`` property.
If ``allowed_data_types`` of this column is not empty and the datum
has an assigned type, check if the datum's type is in the
``allowed_data_types`` list. If not, the datum won't be displayed
as a link.
If ``link`` is a callable, it will be passed the current data object
and should return a URL. Otherwise ``get_link_url`` will attempt to
call ``reverse`` on ``link`` with the object's id as a parameter.
Failing that, it will simply return the value of ``link``.
"""
if self.allowed_data_types:
data_type_name = self.table._meta.data_type_name
data_type = getattr(datum, data_type_name, None)
if data_type and (data_type not in self.allowed_data_types):
return None
obj_id = self.table.get_object_id(datum)
if callable(self.link):
return self.link(datum)
try:
return urlresolvers.reverse(self.link, args=(obj_id,))
except urlresolvers.NoReverseMatch:
return self.link
def get_summation(self):
"""Returns the summary value for the data in this column if a
valid summation method is specified for it. Otherwise returns ``None``.
"""
if self.summation not in self.summation_methods:
return None
summation_function = self.summation_methods[self.summation]
data = [self.get_raw_data(datum) for datum in self.table.data]
data = filter(lambda datum: datum is not None, data)
if len(data):
try:
summation = summation_function(data)
for filter_func in self.filters:
summation = filter_func(summation)
return summation
except TypeError:
pass
return None
class Row(html.HTMLElement):
"""Represents a row in the table.
When iterated, the ``Row`` instance will yield each of its cells.
Rows are capable of AJAX updating, with a little added work:
The ``ajax`` property needs to be set to ``True``, and
subclasses need to define a ``get_data`` method which returns a data
object appropriate for consumption by the table (effectively the "get"
lookup versus the table's "list" lookup).
The automatic update interval is configurable by setting the key
``ajax_poll_interval`` in the ``HORIZON_CONFIG`` dictionary.
Default: ``2500`` (measured in milliseconds).
.. attribute:: table
The table which this row belongs to.
.. attribute:: datum
The data object which this row represents.
.. attribute:: id
A string uniquely representing this row composed of the table name
and the row data object's identifier.
.. attribute:: cells
The cells belonging to this row stored in a ``SortedDict`` object.
This attribute is populated during instantiation.
.. attribute:: status
Boolean value representing the status of this row calculated from
the values of the table's ``status_columns`` if they are set.
.. attribute:: status_class
Returns a css class for the status of the row based on ``status``.
.. attribute:: ajax
Boolean value to determine whether ajax updating for this row is
enabled.
.. attribute:: ajax_action_name
String that is used for the query parameter key to request AJAX
updates. Generally you won't need to change this value.
Default: ``"row_update"``.
.. attribute:: ajax_cell_action_name
String that is used for the query parameter key to request AJAX
updates of cell. Generally you won't need to change this value.
It is also used for inline edit of the cell.
Default: ``"cell_update"``.
"""
ajax = False
ajax_action_name = "row_update"
ajax_cell_action_name = "cell_update"
def __init__(self, table, datum=None):
super(Row, self).__init__()
self.table = table
self.datum = datum
self.selected = False
if self.datum:
self.load_cells()
else:
self.id = None
self.cells = []
def load_cells(self, datum=None):
"""Load the row's data (either provided at initialization or as an
argument to this function), initialize all the cells contained
by this row, and set the appropriate row properties which require
the row's data to be determined.
This function is called automatically by
:meth:`~horizon.tables.Row.__init__` if the ``datum`` argument is
provided. However, by not providing the data during initialization
this function allows for the possibility of a two-step loading
pattern when you need a row instance but don't yet have the data
available.
"""
# Compile all the cells on instantiation.
table = self.table
if datum:
self.datum = datum
else:
datum = self.datum
cells = []
for column in table.columns.values():
cell = table._meta.cell_class(datum, column, self)
cells.append((column.name or column.auto, cell))
self.cells = SortedDict(cells)
if self.ajax:
interval = conf.HORIZON_CONFIG['ajax_poll_interval']
self.attrs['data-update-interval'] = interval
self.attrs['data-update-url'] = self.get_ajax_update_url()
self.classes.append("ajax-update")
self.attrs['data-object-id'] = table.get_object_id(datum)
# Add the row's status class and id to the attributes to be rendered.
self.classes.append(self.status_class)
id_vals = {"table": self.table.name,
"sep": STRING_SEPARATOR,
"id": table.get_object_id(datum)}
self.id = "%(table)s%(sep)srow%(sep)s%(id)s" % id_vals
self.attrs['id'] = self.id
# Add the row's display name if available
display_name = table.get_object_display(datum)
if display_name:
self.attrs['data-display'] = escape(display_name)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.id)
def __iter__(self):
return iter(self.cells.values())
@property
def status(self):
column_names = self.table._meta.status_columns
if column_names:
statuses = dict([(column_name, self.cells[column_name].status) for
column_name in column_names])
return self.table.calculate_row_status(statuses)
@property
def status_class(self):
column_names = self.table._meta.status_columns
if column_names:
return self.table.get_row_status_class(self.status)
else:
return ''
def render(self):
return render_to_string("horizon/common/_data_table_row.html",
{"row": self})
def get_cells(self):
"""Returns the bound cells for this row in order."""
return self.cells.values()
def get_ajax_update_url(self):
table_url = self.table.get_absolute_url()
params = urlencode(SortedDict([
("action", self.ajax_action_name),
("table", self.table.name),
("obj_id", self.table.get_object_id(self.datum))
]))
return "%s?%s" % (table_url, params)
def can_be_selected(self, datum):
"""By default if multiselect enabled return True. You can remove the
checkbox after an ajax update here if required.
"""
return True
def get_data(self, request, obj_id):
"""Fetches the updated data for the row based on the object id
passed in. Must be implemented by a subclass to allow AJAX updating.
"""
return {}
class Cell(html.HTMLElement):
"""Represents a single cell in the table."""
def __init__(self, datum, column, row, attrs=None, classes=None):
self.classes = classes or getattr(self, "classes", [])
super(Cell, self).__init__()
self.attrs.update(attrs or {})
self.datum = datum
self.column = column
self.row = row
self.wrap_list = column.wrap_list
self.inline_edit_available = self.column.update_action is not None
# initialize the update action if available
if self.inline_edit_available:
self.update_action = self.column.update_action()
self.attrs['data-cell-name'] = column.name
self.attrs['data-update-url'] = self.get_ajax_update_url()
self.inline_edit_mod = False
# add tooltip to cells if the truncate variable is set
if column.truncate:
data = getattr(datum, column.name, '') or ''
if len(data) > column.truncate:
self.attrs['data-toggle'] = 'tooltip'
self.attrs['title'] = data
self.data = self.get_data(datum, column, row)
def get_data(self, datum, column, row):
"""Fetches the data to be displayed in this cell."""
table = row.table
if column.auto == "multi_select":
data = ""
if row.can_be_selected(datum):
widget = forms.CheckboxInput(check_test=lambda value: False)
# Convert value to string to avoid accidental type conversion
data = widget.render('object_ids',
six.text_type(table.get_object_id(datum)),
{'class': 'table-row-multi-select'})
table._data_cache[column][table.get_object_id(datum)] = data
elif column.auto == "form_field":
widget = column.form_field
if issubclass(widget.__class__, forms.Field):
widget = widget.widget
widget_name = "%s__%s" % \
(column.name,
six.text_type(table.get_object_id(datum)))
# Create local copy of attributes, so it don't change column
# class form_field_attributes
form_field_attributes = {}
form_field_attributes.update(column.form_field_attributes)
# Adding id of the input so it pairs with label correctly
form_field_attributes['id'] = widget_name
if template.defaultfilters.urlize in column.filters:
data = widget.render(widget_name,
column.get_raw_data(datum),
form_field_attributes)
else:
data = widget.render(widget_name,
column.get_data(datum),
form_field_attributes)
table._data_cache[column][table.get_object_id(datum)] = data
elif column.auto == "actions":
data = table.render_row_actions(datum, pull_right=False)
table._data_cache[column][table.get_object_id(datum)] = data
else:
data = column.get_data(datum)
if column.cell_attributes_getter:
cell_attributes = column.cell_attributes_getter(data) or {}
self.attrs.update(cell_attributes)
return data
def __repr__(self):
return '<%s: %s, %s>' % (self.__class__.__name__,
self.column.name,
self.row.id)
@property
def id(self):
return ("%s__%s" % (self.column.name,
six.text_type(self.row.table.get_object_id(self.datum))))
@property
def value(self):
"""Returns a formatted version of the data for final output.
This takes into consideration the
:attr:`~horizon.tables.Column.link`` and
:attr:`~horizon.tables.Column.empty_value`
attributes.
"""
try:
data = self.column.get_data(self.datum)
if data is None:
if callable(self.column.empty_value):
data = self.column.empty_value(self.datum)
else:
data = self.column.empty_value
except Exception:
data = None
exc_info = sys.exc_info()
raise six.reraise(template.TemplateSyntaxError, exc_info[1],
exc_info[2])
if self.url and not self.column.auto == "form_field":
link_attrs = ' '.join(['%s="%s"' % (k, v) for (k, v) in
self.column.link_attrs.items()])
# Escape the data inside while allowing our HTML to render
data = mark_safe('<a href="%s" %s>%s</a>' % (
(escape(self.url),
link_attrs,
escape(six.text_type(data)))))
return data
@property
def url(self):
if self.column.link:
url = self.column.get_link_url(self.datum)
if url:
return url
else:
return None
@property
def status(self):
"""Gets the status for the column based on the cell's data."""
# Deal with status column mechanics based in this cell's data
if hasattr(self, '_status'):
return self._status
if self.column.status or \
self.column.name in self.column.table._meta.status_columns:
# returns the first matching status found
data_status_lower = six.text_type(
self.column.get_raw_data(self.datum)).lower()
for status_name, status_value in self.column.status_choices:
if six.text_type(status_name).lower() == data_status_lower:
self._status = status_value
return self._status
self._status = None
return self._status
def get_status_class(self, status):
"""Returns a css class name determined by the status value."""
if status is True:
return "status_up"
elif status is False:
return "status_down"
else:
return "status_unknown"
def get_default_classes(self):
"""Returns a flattened string of the cell's CSS classes."""
if not self.url:
self.column.classes = [cls for cls in self.column.classes
if cls != "anchor"]
column_class_string = self.column.get_final_attrs().get('class', "")
classes = set(column_class_string.split(" "))
if self.column.status:
classes.add(self.get_status_class(self.status))
if self.inline_edit_available:
classes.add("inline_edit_available")
return list(classes)
def get_ajax_update_url(self):
column = self.column
table_url = column.table.get_absolute_url()
params = urlencode(SortedDict([
("action", self.row.ajax_cell_action_name),
("table", column.table.name),
("cell_name", column.name),
("obj_id", column.table.get_object_id(self.datum))
]))
return "%s?%s" % (table_url, params)
@property
def update_allowed(self):
"""Determines whether update of given cell is allowed.
Calls allowed action of defined UpdateAction of the Column.
"""
return self.update_action.allowed(self.column.table.request,
self.datum,
self)
def render(self):
return render_to_string("horizon/common/_data_table_cell.html",
{"cell": self})
class DataTableOptions(object):
"""Contains options for :class:`.DataTable` objects.
.. attribute:: name
A short name or slug for the table.
.. attribute:: verbose_name
A more verbose name for the table meant for display purposes.
.. attribute:: columns
A list of column objects or column names. Controls ordering/display
of the columns in the table.
.. attribute:: table_actions
A list of action classes derived from the
:class:`~horizon.tables.Action` class. These actions will handle tasks
such as bulk deletion, etc. for multiple objects at once.
.. attribute:: table_actions_menu
A list of action classes similar to ``table_actions`` except these
will be displayed in a menu instead of as individual buttons. Actions
from this list will take precedence over actions from the
``table_actions`` list.
.. attribute:: row_actions
A list similar to ``table_actions`` except tailored to appear for
each row. These actions act on a single object at a time.
.. attribute:: actions_column
Boolean value to control rendering of an additional column containing
the various actions for each row. Defaults to ``True`` if any actions
are specified in the ``row_actions`` option.
.. attribute:: multi_select
Boolean value to control rendering of an extra column with checkboxes
for selecting multiple objects in the table. Defaults to ``True`` if
any actions are specified in the ``table_actions`` option.
.. attribute:: filter
Boolean value to control the display of the "filter" search box
in the table actions. By default it checks whether or not an instance
of :class:`.FilterAction` is in :attr:`.table_actions`.
.. attribute:: template
String containing the template which should be used to render the
table. Defaults to ``"horizon/common/_data_table.html"``.
.. attribute:: context_var_name
The name of the context variable which will contain the table when
it is rendered. Defaults to ``"table"``.
.. attribute:: prev_pagination_param
The name of the query string parameter which will be used when
paginating backward in this table. When using multiple tables in a
single view this will need to be changed to differentiate between the
tables. Default: ``"prev_marker"``.
.. attribute:: pagination_param
The name of the query string parameter which will be used when
paginating forward in this table. When using multiple tables in a
single view this will need to be changed to differentiate between the
tables. Default: ``"marker"``.
.. attribute:: status_columns
A list or tuple of column names which represents the "state"
of the data object being represented.
If ``status_columns`` is set, when the rows are rendered the value
of this column will be used to add an extra class to the row in
the form of ``"status_up"`` or ``"status_down"`` for that row's
data.
The row status is used by other Horizon components to trigger tasks
such as dynamic AJAX updating.
.. attribute:: cell_class
The class which should be used for rendering the cells of this table.
Optional. Default: :class:`~horizon.tables.Cell`.
.. attribute:: row_class
The class which should be used for rendering the rows of this table.
Optional. Default: :class:`~horizon.tables.Row`.
.. attribute:: column_class
The class which should be used for handling the columns of this table.
Optional. Default: :class:`~horizon.tables.Column`.
.. attribute:: css_classes
A custom CSS class or classes to add to the ``<table>`` tag of the
rendered table, for when the particular table requires special styling.
Default: ``""``.
.. attribute:: mixed_data_type
A toggle to indicate if the table accepts two or more types of data.
Optional. Default: ``False``
.. attribute:: data_types
A list of data types that this table would accept. Default to be an
empty list, but if the attribute ``mixed_data_type`` is set to
``True``, then this list must have at least one element.
.. attribute:: data_type_name
The name of an attribute to assign to data passed to the table when it
accepts mix data. Default: ``"_table_data_type"``
.. attribute:: footer
Boolean to control whether or not to show the table's footer.
Default: ``True``.
.. attribute:: hidden_title
Boolean to control whether or not to show the table's title.
Default: ``True``.
.. attribute:: permissions
A list of permission names which this table requires in order to be
displayed. Defaults to an empty list (``[]``).
"""
def __init__(self, options):
self.name = getattr(options, 'name', self.__class__.__name__)
verbose_name = (getattr(options, 'verbose_name', None)
or self.name.title())
self.verbose_name = verbose_name
self.columns = getattr(options, 'columns', None)
self.status_columns = getattr(options, 'status_columns', [])
self.table_actions = getattr(options, 'table_actions', [])
self.row_actions = getattr(options, 'row_actions', [])
self.table_actions_menu = getattr(options, 'table_actions_menu', [])
self.cell_class = getattr(options, 'cell_class', Cell)
self.row_class = getattr(options, 'row_class', Row)
self.column_class = getattr(options, 'column_class', Column)
self.css_classes = getattr(options, 'css_classes', '')
self.prev_pagination_param = getattr(options,
'prev_pagination_param',
'prev_marker')
self.pagination_param = getattr(options, 'pagination_param', 'marker')
self.browser_table = getattr(options, 'browser_table', None)
self.footer = getattr(options, 'footer', True)
self.hidden_title = getattr(options, 'hidden_title', True)
self.no_data_message = getattr(options,
"no_data_message",
_("No items to display."))
self.permissions = getattr(options, 'permissions', [])
# Set self.filter if we have any FilterActions
filter_actions = [action for action in self.table_actions if
issubclass(action, FilterAction)]
if len(filter_actions) > 1:
raise NotImplementedError("Multiple filter actions is not "
"currently supported.")
self.filter = getattr(options, 'filter', len(filter_actions) > 0)
if len(filter_actions) == 1:
self._filter_action = filter_actions.pop()
else:
self._filter_action = None
self.template = getattr(options,
'template',
'horizon/common/_data_table.html')
self.row_actions_dropdown_template = ('horizon/common/_data_table_'
'row_actions_dropdown.html')
self.row_actions_row_template = ('horizon/common/_data_table_'
'row_actions_row.html')
self.table_actions_template = \
'horizon/common/_data_table_table_actions.html'
self.context_var_name = six.text_type(getattr(options,
'context_var_name',
'table'))
self.actions_column = getattr(options,
'actions_column',
len(self.row_actions) > 0)
self.multi_select = getattr(options,
'multi_select',
len(self.table_actions) > 0)
# Set runtime table defaults; not configurable.
self.has_prev_data = False
self.has_more_data = False
# Set mixed data type table attr
self.mixed_data_type = getattr(options, 'mixed_data_type', False)
self.data_types = getattr(options, 'data_types', [])
# If the data_types has more than 2 elements, set mixed_data_type
# to True automatically.
if len(self.data_types) > 1:
self.mixed_data_type = True
# However, if the mixed_data_type is set to True manually and the
# the data_types is empty, raise an error.
if self.mixed_data_type and len(self.data_types) <= 1:
raise ValueError("If mixed_data_type is set to True in class %s, "
"data_types should has more than one types" %
self.name)
self.data_type_name = getattr(options,
'data_type_name',
"_table_data_type")
class DataTableMetaclass(type):
"""Metaclass to add options to DataTable class and collect columns."""
def __new__(mcs, name, bases, attrs):
# Process options from Meta
class_name = name
attrs["_meta"] = opts = DataTableOptions(attrs.get("Meta", None))
# Gather columns; this prevents the column from being an attribute
# on the DataTable class and avoids naming conflicts.
columns = []
for attr_name, obj in attrs.items():
if issubclass(type(obj), (opts.column_class, Column)):
column_instance = attrs.pop(attr_name)
column_instance.name = attr_name
column_instance.classes.append('normal_column')
columns.append((attr_name, column_instance))
columns.sort(key=lambda x: x[1].creation_counter)
# Iterate in reverse to preserve final order
for base in bases[::-1]:
if hasattr(base, 'base_columns'):
columns = base.base_columns.items() + columns
attrs['base_columns'] = SortedDict(columns)
# If the table is in a ResourceBrowser, the column number must meet
# these limits because of the width of the browser.
if opts.browser_table == "navigation" and len(columns) > 3:
raise ValueError("You can only assign three column to %s."
% class_name)
if opts.browser_table == "content" and len(columns) > 2:
raise ValueError("You can only assign two columns to %s."
% class_name)
if opts.columns:
# Remove any columns that weren't declared if we're being explicit
# NOTE: we're iterating a COPY of the list here!
for column_data in columns[:]:
if column_data[0] not in opts.columns:
columns.pop(columns.index(column_data))
# Re-order based on declared columns
columns.sort(key=lambda x: attrs['_meta'].columns.index(x[0]))
# Add in our auto-generated columns
if opts.multi_select and opts.browser_table != "navigation":
multi_select = opts.column_class("multi_select",
verbose_name="",
auto="multi_select")
multi_select.classes.append('multi_select_column')
columns.insert(0, ("multi_select", multi_select))
if opts.actions_column:
actions_column = opts.column_class("actions",
verbose_name=_("Actions"),
auto="actions")
actions_column.classes.append('actions_column')
columns.append(("actions", actions_column))
# Store this set of columns internally so we can copy them per-instance
attrs['_columns'] = SortedDict(columns)
# Gather and register actions for later access since we only want
# to instantiate them once.
# (list() call gives deterministic sort order, which sets don't have.)
actions = list(set(opts.row_actions) | set(opts.table_actions) |
set(opts.table_actions_menu))
actions.sort(key=attrgetter('name'))
actions_dict = SortedDict([(action.name, action())
for action in actions])
attrs['base_actions'] = actions_dict
if opts._filter_action:
# Replace our filter action with the instantiated version
opts._filter_action = actions_dict[opts._filter_action.name]
# Create our new class!
return type.__new__(mcs, name, bases, attrs)
@six.add_metaclass(DataTableMetaclass)
class DataTable(object):
"""A class which defines a table with all data and associated actions.
.. attribute:: name
String. Read-only access to the name specified in the
table's Meta options.
.. attribute:: multi_select
Boolean. Read-only access to whether or not this table
should display a column for multi-select checkboxes.
.. attribute:: data
Read-only access to the data this table represents.
.. attribute:: filtered_data
Read-only access to the data this table represents, filtered by
the :meth:`~horizon.tables.FilterAction.filter` method of the table's
:class:`~horizon.tables.FilterAction` class (if one is provided)
using the current request's query parameters.
"""
def __init__(self, request, data=None, needs_form_wrapper=None, **kwargs):
self.request = request
self.data = data
self.kwargs = kwargs
self._needs_form_wrapper = needs_form_wrapper
self._no_data_message = self._meta.no_data_message
self.breadcrumb = None
self.current_item_id = None
self.permissions = self._meta.permissions
# Create a new set
columns = []
for key, _column in self._columns.items():
column = copy.copy(_column)
column.table = self
columns.append((key, column))
self.columns = SortedDict(columns)
self._populate_data_cache()
# Associate these actions with this table
for action in self.base_actions.values():
action.associate_with_table(self)
self.needs_summary_row = any([col.summation
for col in self.columns.values()])
def __unicode__(self):
return six.text_type(self._meta.verbose_name)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._meta.name)
@property
def name(self):
return self._meta.name
@property
def footer(self):
return self._meta.footer
@property
def multi_select(self):
return self._meta.multi_select
@property
def filtered_data(self):
# This function should be using django.utils.functional.cached_property
# decorator, but unfortunately due to bug in Django
# https://code.djangoproject.com/ticket/19872 it would make it fail
# when being mocked by mox in tests.
if not hasattr(self, '_filtered_data'):
self._filtered_data = self.data
if self._meta.filter and self._meta._filter_action:
action = self._meta._filter_action
filter_string = self.get_filter_string()
filter_field = self.get_filter_field()
request_method = self.request.method
needs_preloading = (not filter_string
and request_method == 'GET'
and action.needs_preloading)
valid_method = (request_method == action.method)
not_api_filter = (filter_string
and not action.is_api_filter(filter_field))
if valid_method or needs_preloading or not_api_filter:
if self._meta.mixed_data_type:
self._filtered_data = action.data_type_filter(
self, self.data, filter_string)
else:
self._filtered_data = action.filter(
self, self.data, filter_string)
return self._filtered_data
def slugify_name(self):
return str(slugify(self._meta.name))
def get_filter_string(self):
"""Get the filter string value. For 'server' type filters this is
saved in the session so that it gets persisted across table loads.
For other filter types this is obtained from the POST dict.
"""
filter_action = self._meta._filter_action
param_name = filter_action.get_param_name()
filter_string = ''
if filter_action.filter_type == 'server':
filter_string = self.request.session.get(param_name, '')
else:
filter_string = self.request.POST.get(param_name, '')
return filter_string
def get_filter_field(self):
"""Get the filter field value used for 'server' type filters. This
is the value from the filter action's list of filter choices.
"""
filter_action = self._meta._filter_action
param_name = '%s_field' % filter_action.get_param_name()
filter_field = self.request.session.get(param_name, '')
return filter_field
def _populate_data_cache(self):
self._data_cache = {}
# Set up hash tables to store data points for each column
for column in self.get_columns():
self._data_cache[column] = {}
def _filter_action(self, action, request, datum=None):
try:
# Catch user errors in permission functions here
row_matched = True
if self._meta.mixed_data_type:
row_matched = action.data_type_matched(datum)
return action._allowed(request, datum) and row_matched
except Exception:
LOG.exception("Error while checking action permissions.")
return None
def is_browser_table(self):
if self._meta.browser_table:
return True
return False
def render(self):
"""Renders the table using the template from the table options."""
table_template = template.loader.get_template(self._meta.template)
extra_context = {self._meta.context_var_name: self,
'hidden_title': self._meta.hidden_title}
context = template.RequestContext(self.request, extra_context)
return table_template.render(context)
def get_absolute_url(self):
"""Returns the canonical URL for this table.
This is used for the POST action attribute on the form element
wrapping the table. In many cases it is also useful for redirecting
after a successful action on the table.
For convenience it defaults to the value of
``request.get_full_path()`` with any query string stripped off,
e.g. the path at which the table was requested.
"""
return self.request.get_full_path().partition('?')[0]
def get_full_url(self):
"""Returns the full URL path for this table.
This is used for the POST action attribute on the form element
wrapping the table. We use this method to persist the
pagination marker.
"""
return self.request.get_full_path()
def get_empty_message(self):
"""Returns the message to be displayed when there is no data."""
return self._no_data_message
def get_object_by_id(self, lookup):
"""Returns the data object from the table's dataset which matches
the ``lookup`` parameter specified. An error will be raised if
the match is not a single data object.
We will convert the object id and ``lookup`` to unicode before
comparison.
Uses :meth:`~horizon.tables.DataTable.get_object_id` internally.
"""
if not isinstance(lookup, six.text_type):
lookup = six.text_type(str(lookup), 'utf-8')
matches = []
for datum in self.data:
obj_id = self.get_object_id(datum)
if not isinstance(obj_id, six.text_type):
obj_id = six.text_type(str(obj_id), 'utf-8')
if obj_id == lookup:
matches.append(datum)
if len(matches) > 1:
raise ValueError("Multiple matches were returned for that id: %s."
% matches)
if not matches:
raise exceptions.Http302(self.get_absolute_url(),
_('No match returned for the id "%s".')
% lookup)
return matches[0]
@property
def has_actions(self):
"""Boolean. Indicates whether there are any available actions on this
table.
"""
if not self.base_actions:
return False
return any(self.get_table_actions()) or any(self._meta.row_actions)
@property
def needs_form_wrapper(self):
"""Boolean. Indicates whether this table should be rendered wrapped in
a ``<form>`` tag or not.
"""
# If needs_form_wrapper is explicitly set, defer to that.
if self._needs_form_wrapper is not None:
return self._needs_form_wrapper
# Otherwise calculate whether or not we need a form element.
return self.has_actions
def get_table_actions(self):
"""Returns a list of the action instances for this table."""
button_actions = [self.base_actions[action.name] for action in
self._meta.table_actions if
action not in self._meta.table_actions_menu]
menu_actions = [self.base_actions[action.name] for
action in self._meta.table_actions_menu]
bound_actions = button_actions + menu_actions
return [action for action in bound_actions if
self._filter_action(action, self.request)]
def get_row_actions(self, datum):
"""Returns a list of the action instances for a specific row."""
bound_actions = []
for action in self._meta.row_actions:
# Copy to allow modifying properties per row
bound_action = copy.copy(self.base_actions[action.name])
bound_action.attrs = copy.copy(bound_action.attrs)
bound_action.datum = datum
# Remove disallowed actions.
if not self._filter_action(bound_action,
self.request,
datum):
continue
# Hook for modifying actions based on data. No-op by default.
bound_action.update(self.request, datum)
# Pre-create the URL for this link with appropriate parameters
if issubclass(bound_action.__class__, LinkAction):
bound_action.bound_url = bound_action.get_link_url(datum)
bound_actions.append(bound_action)
return bound_actions
def set_multiselect_column_visibility(self, visible=True):
"""hide checkbox column if no current table action is allowed."""
if not self.multi_select:
return
select_column = self.columns.values()[0]
# Try to find if the hidden class need to be
# removed or added based on visible flag.
hidden_found = 'hidden' in select_column.classes
if hidden_found and visible:
select_column.classes.remove('hidden')
elif not hidden_found and not visible:
select_column.classes.append('hidden')
def render_table_actions(self):
"""Renders the actions specified in ``Meta.table_actions``."""
template_path = self._meta.table_actions_template
table_actions_template = template.loader.get_template(template_path)
bound_actions = self.get_table_actions()
extra_context = {"table_actions": bound_actions,
"table_actions_buttons": [],
"table_actions_menu": []}
if self._meta.filter and (
self._filter_action(self._meta._filter_action, self.request)):
extra_context["filter"] = self._meta._filter_action
for action in bound_actions:
if action.__class__ in self._meta.table_actions_menu:
extra_context['table_actions_menu'].append(action)
elif action != extra_context.get('filter'):
extra_context['table_actions_buttons'].append(action)
context = template.RequestContext(self.request, extra_context)
self.set_multiselect_column_visibility(len(bound_actions) > 0)
return table_actions_template.render(context)
def render_row_actions(self, datum, pull_right=True, row=False):
"""Renders the actions specified in ``Meta.row_actions`` using the
current row data. If `row` is True, the actions are rendered in a row
of buttons. Otherwise they are rendered in a dropdown box.
"""
if row:
template_path = self._meta.row_actions_row_template
else:
template_path = self._meta.row_actions_dropdown_template
row_actions_template = template.loader.get_template(template_path)
bound_actions = self.get_row_actions(datum)
extra_context = {"row_actions": bound_actions,
"row_id": self.get_object_id(datum),
"pull_right": pull_right}
context = template.RequestContext(self.request, extra_context)
return row_actions_template.render(context)
@staticmethod
def parse_action(action_string):
"""Parses the ``action`` parameter (a string) sent back with the
POST data. By default this parses a string formatted as
``{{ table_name }}__{{ action_name }}__{{ row_id }}`` and returns
each of the pieces. The ``row_id`` is optional.
"""
if action_string:
bits = action_string.split(STRING_SEPARATOR)
table = bits[0]
action = bits[1]
try:
object_id = STRING_SEPARATOR.join(bits[2:])
if object_id == '':
object_id = None
except IndexError:
object_id = None
return table, action, object_id
def take_action(self, action_name, obj_id=None, obj_ids=None):
"""Locates the appropriate action and routes the object
data to it. The action should return an HTTP redirect
if successful, or a value which evaluates to ``False``
if unsuccessful.
"""
# See if we have a list of ids
obj_ids = obj_ids or self.request.POST.getlist('object_ids')
action = self.base_actions.get(action_name, None)
if not action or action.method != self.request.method:
# We either didn't get an action or we're being hacked. Goodbye.
return None
# Meanwhile, back in Gotham...
if not action.requires_input or obj_id or obj_ids:
if obj_id:
obj_id = self.sanitize_id(obj_id)
if obj_ids:
obj_ids = [self.sanitize_id(i) for i in obj_ids]
# Single handling is easy
if not action.handles_multiple:
response = action.single(self, self.request, obj_id)
# Otherwise figure out what to pass along
else:
# Preference given to a specific id, since that implies
# the user selected an action for just one row.
if obj_id:
obj_ids = [obj_id]
response = action.multiple(self, self.request, obj_ids)
return response
elif action and action.requires_input and not (obj_id or obj_ids):
messages.info(self.request,
_("Please select a row before taking that action."))
return None
@classmethod
def check_handler(cls, request):
"""Determine whether the request should be handled by this table."""
if request.method == "POST" and "action" in request.POST:
table, action, obj_id = cls.parse_action(request.POST["action"])
elif "table" in request.GET and "action" in request.GET:
table = request.GET["table"]
action = request.GET["action"]
obj_id = request.GET.get("obj_id", None)
else:
table = action = obj_id = None
return table, action, obj_id
def maybe_preempt(self):
"""Determine whether the request should be handled by a preemptive
action on this table or by an AJAX row update before loading any data.
"""
request = self.request
table_name, action_name, obj_id = self.check_handler(request)
if table_name == self.name:
# Handle AJAX row updating.
new_row = self._meta.row_class(self)
if new_row.ajax and new_row.ajax_action_name == action_name:
try:
datum = new_row.get_data(request, obj_id)
if self.get_object_id(datum) == self.current_item_id:
self.selected = True
new_row.classes.append('current_selected')
new_row.load_cells(datum)
error = False
except Exception:
datum = None
error = exceptions.handle(request, ignore=True)
if request.is_ajax():
if not error:
return HttpResponse(new_row.render())
else:
return HttpResponse(status=error.status_code)
elif new_row.ajax_cell_action_name == action_name:
# inline edit of the cell actions
return self.inline_edit_handle(request, table_name,
action_name, obj_id,
new_row)
preemptive_actions = [action for action in
self.base_actions.values() if action.preempt]
if action_name:
for action in preemptive_actions:
if action.name == action_name:
handled = self.take_action(action_name, obj_id)
if handled:
return handled
return None
def inline_edit_handle(self, request, table_name, action_name, obj_id,
new_row):
"""Inline edit handler.
Showing form or handling update by POST of the cell.
"""
try:
cell_name = request.GET['cell_name']
datum = new_row.get_data(request, obj_id)
# TODO(lsmola) extract load cell logic to Cell and load
# only 1 cell. This is kind of ugly.
if request.GET.get('inline_edit_mod') == "true":
new_row.table.columns[cell_name].auto = "form_field"
inline_edit_mod = True
else:
inline_edit_mod = False
# Load the cell and set the inline_edit_mod.
new_row.load_cells(datum)
cell = new_row.cells[cell_name]
cell.inline_edit_mod = inline_edit_mod
# If not allowed, neither edit mod or updating is allowed.
if not cell.update_allowed:
datum_display = (self.get_object_display(datum) or
_("N/A"))
LOG.info('Permission denied to %s: "%s"' %
("Update Action", datum_display))
return HttpResponse(status=401)
# If it is post request, we are updating the cell.
if request.method == "POST":
return self.inline_update_action(request,
datum,
cell,
obj_id,
cell_name)
error = False
except Exception:
datum = None
error = exceptions.handle(request, ignore=True)
if request.is_ajax():
if not error:
return HttpResponse(cell.render())
else:
return HttpResponse(status=error.status_code)
def inline_update_action(self, request, datum, cell, obj_id, cell_name):
"""Handling update by POST of the cell.
"""
new_cell_value = request.POST.get(
cell_name + '__' + obj_id, None)
if issubclass(cell.column.form_field.__class__,
forms.Field):
try:
# using Django Form Field to parse the
# right value from POST and to validate it
new_cell_value = (
cell.column.form_field.clean(
new_cell_value))
cell.update_action.action(
self.request, datum, obj_id, cell_name, new_cell_value)
response = {
'status': 'updated',
'message': ''
}
return HttpResponse(
json.dumps(response),
status=200,
content_type="application/json")
except core_exceptions.ValidationError:
# if there is a validation error, I will
# return the message to the client
exc_type, exc_value, exc_traceback = (
sys.exc_info())
response = {
'status': 'validation_error',
'message': ' '.join(exc_value.messages)}
return HttpResponse(
json.dumps(response),
status=400,
content_type="application/json")
def maybe_handle(self):
"""Determine whether the request should be handled by any action on
this table after data has been loaded.
"""
request = self.request
table_name, action_name, obj_id = self.check_handler(request)
if table_name == self.name and action_name:
action_names = [action.name for action in
self.base_actions.values() if not action.preempt]
# do not run preemptive actions here
if action_name in action_names:
return self.take_action(action_name, obj_id)
return None
def sanitize_id(self, obj_id):
"""Override to modify an incoming obj_id to match existing
API data types or modify the format.
"""
return obj_id
def get_object_id(self, datum):
"""Returns the identifier for the object this row will represent.
By default this returns an ``id`` attribute on the given object,
but this can be overridden to return other values.
.. warning::
Make sure that the value returned is a unique value for the id
otherwise rendering issues can occur.
"""
return datum.id
def get_object_display(self, datum):
"""Returns a display name that identifies this object.
By default, this returns a ``name`` attribute from the given object,
but this can be overridden to return other values.
"""
if hasattr(datum, 'name'):
return datum.name
return None
def has_prev_data(self):
"""Returns a boolean value indicating whether there is previous data
available to this table from the source (generally an API).
The method is largely meant for internal use, but if you want to
override it to provide custom behavior you can do so at your own risk.
"""
return self._meta.has_prev_data
def has_more_data(self):
"""Returns a boolean value indicating whether there is more data
available to this table from the source (generally an API).
The method is largely meant for internal use, but if you want to
override it to provide custom behavior you can do so at your own risk.
"""
return self._meta.has_more_data
def get_prev_marker(self):
"""Returns the identifier for the first object in the current data set
for APIs that use marker/limit-based paging.
"""
return http.urlquote_plus(self.get_object_id(self.data[0])) \
if self.data else ''
def get_marker(self):
"""Returns the identifier for the last object in the current data set
for APIs that use marker/limit-based paging.
"""
return http.urlquote_plus(self.get_object_id(self.data[-1])) \
if self.data else ''
def get_prev_pagination_string(self):
"""Returns the query parameter string to paginate this table
to the previous page.
"""
return "=".join([self._meta.prev_pagination_param,
self.get_prev_marker()])
def get_pagination_string(self):
"""Returns the query parameter string to paginate this table
to the next page.
"""
return "=".join([self._meta.pagination_param, self.get_marker()])
def calculate_row_status(self, statuses):
"""Returns a boolean value determining the overall row status
based on the dictionary of column name to status mappings passed in.
By default, it uses the following logic:
#. If any statuses are ``False``, return ``False``.
#. If no statuses are ``False`` but any or ``None``, return ``None``.
#. If all statuses are ``True``, return ``True``.
This provides the greatest protection against false positives without
weighting any particular columns.
The ``statuses`` parameter is passed in as a dictionary mapping
column names to their statuses in order to allow this function to
be overridden in such a way as to weight one column's status over
another should that behavior be desired.
"""
values = statuses.values()
if any([status is False for status in values]):
return False
elif any([status is None for status in values]):
return None
else:
return True
def get_row_status_class(self, status):
"""Returns a css class name determined by the status value. This class
name is used to indicate the status of the rows in the table if
any ``status_columns`` have been specified.
"""
if status is True:
return "status_up"
elif status is False:
return "status_down"
else:
return "status_unknown"
def get_columns(self):
"""Returns this table's columns including auto-generated ones."""
return self.columns.values()
def get_rows(self):
"""Return the row data for this table broken out by columns."""
rows = []
try:
for datum in self.filtered_data:
row = self._meta.row_class(self, datum)
if self.get_object_id(datum) == self.current_item_id:
self.selected = True
row.classes.append('current_selected')
rows.append(row)
except Exception:
# Exceptions can be swallowed at the template level here,
# re-raising as a TemplateSyntaxError makes them visible.
LOG.exception("Error while rendering table rows.")
exc_info = sys.exc_info()
raise six.reraise(template.TemplateSyntaxError, exc_info[1],
exc_info[2])
return rows
def css_classes(self):
"""Returns the additional CSS class to be added to <table> tag."""
return self._meta.css_classes
| apache-2.0 |
cloudwatt/contrail-neutron-plugin | neutron_plugin_contrail/plugins/opencontrail/loadbalancer/virtual_ip.py | 3 | 9900 | #
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
from neutron.api.v2 import attributes
from neutron.common import exceptions as n_exc
from neutron.extensions import loadbalancer
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from vnc_api.vnc_api import IdPermsType, NoIdError
from vnc_api.vnc_api import InstanceIp, VirtualMachineInterface
from vnc_api.vnc_api import SecurityGroup
from vnc_api.vnc_api import VirtualIp, VirtualIpType
from resource_manager import ResourceManager
import utils
LOG = logging.getLogger(__name__)
class VirtualIpManager(ResourceManager):
_virtual_ip_type_mapping = {
'address': 'address',
'protocol': 'protocol',
'protocol_port': 'protocol_port',
'connection_limit': 'connection_limit',
'subnet_id': 'subnet_id',
'admin_state': 'admin_state_up',
}
@property
def property_type_mapping(self):
return self._virtual_ip_type_mapping
def make_properties(self, vip):
props = VirtualIpType()
for key, mapping in self._virtual_ip_type_mapping.iteritems():
if mapping in vip:
setattr(props, key, vip[mapping])
sp = vip['session_persistence']
if sp is not None:
props.persistence_type = sp['type']
if 'cookie_name' in sp:
props.persistence_cookie_name = sp['cookie_name']
return props
def _get_vip_pool_id(self, vip):
pool_refs = vip.get_loadbalancer_pool_refs()
if pool_refs is None:
return None
return pool_refs[0]['uuid']
def _get_interface_params(self, vip, props):
vmi_list = vip.get_virtual_machine_interface_refs()
if vmi_list is None:
return None
port_id = vmi_list[0]['uuid']
if not props.address or props.address == attributes.ATTR_NOT_SPECIFIED:
try:
vmi = self._api.virtual_machine_interface_read(id=port_id)
except NoIdError as ex:
LOG.error(ex)
return None
ip_refs = vmi.get_instance_ip_back_refs()
if ip_refs:
try:
iip = self._api.instance_ip_read(ip_refs[0]['uuid'])
except NoIdError as ex:
LOG.error(ex)
return None
props.address = iip.get_instance_ip_address()
return port_id
def make_dict(self, vip, fields=None):
props = vip.get_virtual_ip_properties()
port_id = self._get_interface_params(vip, props)
res = {'id': vip.uuid,
'tenant_id': vip.parent_uuid,
'name': vip.display_name,
'description': self._get_object_description(vip),
'subnet_id': props.subnet_id,
'address': props.address,
'port_id': port_id,
'protocol_port': props.protocol_port,
'protocol': props.protocol,
'pool_id': self._get_vip_pool_id(vip),
'session_persistence': None,
'connection_limit': props.connection_limit,
'admin_state_up': props.admin_state,
'status': self._get_object_status(vip)}
if props.persistence_type:
sp = {'type': props.persistence_type}
if props.persistence_type == 'APP_COOKIE':
sp['cookie_name'] = props.persistence_cookie_name
res['session_persistence'] = sp
return self._fields(res, fields)
def resource_read(self, id):
return self._api.virtual_ip_read(id=id)
def resource_list(self, tenant_id=None):
return self._api.virtual_ips_list(parent_id=tenant_id)
def resource_update(self, obj):
return self._api.virtual_ip_update(obj)
def resource_delete(self, id):
return self._api.virtual_ip_delete(id=id)
def get_exception_notfound(self, id=None):
return loadbalancer.VipNotFound(vip_id=id)
@property
def neutron_name(self):
return "vip"
@property
def resource_name_plural(self):
return "virtual-ips"
def _create_virtual_interface(self, project, vip_id, subnet_id,
ip_address):
network_id = utils.get_subnet_network_id(self._api, subnet_id)
try:
vnet = self._api.virtual_network_read(id=network_id)
except NoIdError:
raise n_exc.NetworkNotFound(net_id=network_id)
vmi = VirtualMachineInterface(vip_id, project)
vmi.set_virtual_network(vnet)
sg_obj = SecurityGroup("default", project)
vmi.add_security_group(sg_obj)
self._api.virtual_machine_interface_create(vmi)
fq_name = list(project.get_fq_name())
fq_name.append(vip_id)
iip_obj = InstanceIp(fq_name=fq_name)
iip_obj.set_virtual_network(vnet)
iip_obj.set_virtual_machine_interface(vmi)
if ip_address and ip_address != attributes.ATTR_NOT_SPECIFIED:
iip_obj.set_instance_ip_address(ip_address)
self._api.instance_ip_create(fq_name)
return vmi
def _delete_virtual_interface(self, vmi_list):
if vmi_list is None:
return
for vmi_ref in vmi_list:
interface_id = vmi_ref['uuid']
try:
vmi = self._api.virtual_machine_interface_read(id=interface_id)
except NoIdError as ex:
LOG.error(ex)
continue
ip_refs = vmi.get_instance_ip_back_refs()
if ip_refs:
for ref in ip_refs:
self._api.instance_ip_delete(id=ref['uuid'])
self._api.virtual_machine_interface_delete(id=interface_id)
def create(self, context, vip):
"""
Create a VIP.
"""
v = vip['vip']
tenant_id = self._get_tenant_id_for_create(context, v)
project = self._project_read(project_id=tenant_id)
if v['pool_id']:
try:
pool = self._api.loadbalancer_pool_read(id=v['pool_id'])
except NoIdError:
raise loadbalancer.PoolNotFound(pool_id=v['pool_id'])
project_id = pool.parent_uuid
if tenant_id != project_id:
raise n_exc.NotAuthorized()
# TODO: check that the pool has no vip configured
# if pool.protocol != v['protocol']:
# raise loadbalancer.ProtocolMismatch(
# vip_proto=v['protocol'], pool_proto=pool.protocol)
else:
pool = None
uuid = uuidutils.generate_uuid()
name = self._get_resource_name('virtual-ip', project, v['name'], uuid)
props = self.make_properties(v)
id_perms = IdPermsType(uuid=uuid, enable=True,
description=v['description'])
vip = VirtualIp(name, project, virtual_ip_properties=props,
id_perms=id_perms, display_name=v['name'])
vip.uuid = uuid
if pool:
vip.set_loadbalancer_pool(pool)
vmi = self._create_virtual_interface(project, uuid, v['subnet_id'],
v.get('address'))
vip.set_virtual_machine_interface(vmi)
self._api.virtual_ip_create(vip)
return self.make_dict(vip)
def delete(self, context, id):
try:
vip = self._api.virtual_ip_read(id=id)
except NoIdError:
loadbalancer.VipNotFound(vip_id=id)
self._delete_virtual_interface(
vip.get_virtual_machine_interface_refs())
super(VirtualIpManager, self).delete(context, id)
def _update_virtual_ip_properties(self, props, id, vip):
"""
Update virtual ip properties and return True if the have been
modified
"""
# according to the spec:
# status, subnet_id, address, port and protocol are immutable
immutable = ['address', 'protocol', 'protocol_port', 'subnet_id']
for field in immutable:
if field not in vip:
continue
if getattr(props, field) != vip[field]:
msg = 'Attribute %s in vip %s is immutable' % (field, id)
raise n_exc.BadRequest(resource='vip', msg=msg)
# update
change = self.update_properties_subr(props, vip)
if 'session_persistence' in vip:
sp = vip['session_persistence']
if props.persistence_type != sp['type']:
props.persistence_type = sp['type']
change = True
if 'cookie_name' in sp and \
props.persistence_cookie_name != sp['cookie_name']:
props.persistence_cookie_name != sp['cookie_name']
change = True
return change
def update_properties(self, vip_db, id, v):
props = vip_db.get_virtual_ip_properties()
if self._update_virtual_ip_properties(props, id, v):
vip_db.set_virtual_ip_properties(props)
return True
return False
def update_object(self, vip_db, id, v):
if 'pool_id' in v and self._get_vip_pool_id(vip_db) != v['pool_id']:
try:
pool = self._api.loadbalancer_pool_read(id=v['pool_id'])
except NoIdError:
raise loadbalancer.PoolNotFound(pool_id=v['pool_id'])
if vip_db.parent_uuid != pool.parent_uuid:
raise n_exc.NotAuthorized()
# TODO: check that the pool has no vip configured
# TODO: check that the protocol matches
# TODO: check that the pool is in valid state
# TODO: check that the provider is the same.
vip_db.set_localbalancer_pool(pool)
return True
return False
| apache-2.0 |
ReamerKim/pad_crawling | util/beautifulsoup4-4.3.2/beautifulsoup4-4.3.2/build/lib/bs4/tests/test_docs.py | 607 | 1067 | "Test harness for doctests."
# pylint: disable-msg=E0611,W0142
__metaclass__ = type
__all__ = [
'additional_tests',
]
import atexit
import doctest
import os
#from pkg_resources import (
# resource_filename, resource_exists, resource_listdir, cleanup_resources)
import unittest
DOCTEST_FLAGS = (
doctest.ELLIPSIS |
doctest.NORMALIZE_WHITESPACE |
doctest.REPORT_NDIFF)
# def additional_tests():
# "Run the doc tests (README.txt and docs/*, if any exist)"
# doctest_files = [
# os.path.abspath(resource_filename('bs4', 'README.txt'))]
# if resource_exists('bs4', 'docs'):
# for name in resource_listdir('bs4', 'docs'):
# if name.endswith('.txt'):
# doctest_files.append(
# os.path.abspath(
# resource_filename('bs4', 'docs/%s' % name)))
# kwargs = dict(module_relative=False, optionflags=DOCTEST_FLAGS)
# atexit.register(cleanup_resources)
# return unittest.TestSuite((
# doctest.DocFileSuite(*doctest_files, **kwargs)))
| gpl-2.0 |
kongji2008/genetify | pygooglechart/examples/labels.py | 2 | 1720 | #!/usr/bin/env python
import os
import sys
import math
import random
ROOT = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(ROOT, '..'))
from pygooglechart import SimpleLineChart
from pygooglechart import Axis
import settings
import helper
def cat_proximity():
"""Cat proximity graph from http://xkcd.com/231/"""
chart = SimpleLineChart(int(settings.width * 1.5), settings.height)
chart.set_legend(['INTELLIGENCE', 'INSANITY OF STATEMENTS'])
# intelligence
data_index = chart.add_data([100. / y for y in xrange(1, 15)])
# insanity of statements
chart.add_data([100. - 100 / y for y in xrange(1, 15)])
# line colours
chart.set_colours(['208020', '202080'])
# "Near" and "Far" labels, they are placed automatically at either ends.
near_far_axis_index = chart.set_axis_labels(Axis.BOTTOM, ['FAR', 'NEAR'])
# "Human Proximity to cat" label. Aligned to the center.
index = chart.set_axis_labels(Axis.BOTTOM, ['HUMAN PROXIMITY TO CAT'])
chart.set_axis_style(index, '202020', font_size=10, alignment=0)
chart.set_axis_positions(index, [50])
chart.download('label-cat-proximity.png')
def many_labels():
chart = SimpleLineChart(settings.width, settings.height)
for a in xrange(3):
for axis_type in (Axis.LEFT, Axis.RIGHT, Axis.BOTTOM):
index = chart.set_axis_range(axis_type, 0, random.random() * 100)
chart.set_axis_style(index, colour=helper.random_colour(), \
font_size=random.random() * 10 + 5)
chart.add_data(helper.random_data())
chart.download('label-many.png')
def main():
cat_proximity()
many_labels()
if __name__ == '__main__':
main()
| mit |
indictranstech/focal-erpnext | erpnext/stock/stock_ledger.py | 15 | 13389 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import cint, flt, cstr, now
from erpnext.stock.utils import get_valuation_method
import json
# future reposting
class NegativeStockError(frappe.ValidationError): pass
_exceptions = frappe.local('stockledger_exceptions')
# _exceptions = []
def make_sl_entries(sl_entries, is_amended=None):
if sl_entries:
from erpnext.stock.utils import update_bin
cancel = True if sl_entries[0].get("is_cancelled") == "Yes" else False
if cancel:
set_as_cancel(sl_entries[0].get('voucher_no'), sl_entries[0].get('voucher_type'))
for sle in sl_entries:
sle_id = None
if sle.get('is_cancelled') == 'Yes':
sle['actual_qty'] = -flt(sle['actual_qty'])
if sle.get("actual_qty") or sle.get("voucher_type")=="Stock Reconciliation":
sle_id = make_entry(sle)
args = sle.copy()
args.update({
"sle_id": sle_id,
"is_amended": is_amended
})
update_bin(args)
if cancel:
delete_cancelled_entry(sl_entries[0].get('voucher_type'), sl_entries[0].get('voucher_no'))
def set_as_cancel(voucher_type, voucher_no):
frappe.db.sql("""update `tabStock Ledger Entry` set is_cancelled='Yes',
modified=%s, modified_by=%s
where voucher_no=%s and voucher_type=%s""",
(now(), frappe.session.user, voucher_type, voucher_no))
def make_entry(args):
args.update({"doctype": "Stock Ledger Entry"})
sle = frappe.get_doc(args)
sle.ignore_permissions = 1
sle.insert()
sle.submit()
return sle.name
def delete_cancelled_entry(voucher_type, voucher_no):
frappe.db.sql("""delete from `tabStock Ledger Entry`
where voucher_type=%s and voucher_no=%s""", (voucher_type, voucher_no))
def update_entries_after(args, allow_zero_rate=False, verbose=1):
"""
update valution rate and qty after transaction
from the current time-bucket onwards
args = {
"item_code": "ABC",
"warehouse": "XYZ",
"posting_date": "2012-12-12",
"posting_time": "12:00"
}
"""
if not _exceptions:
frappe.local.stockledger_exceptions = []
previous_sle = get_sle_before_datetime(args)
qty_after_transaction = flt(previous_sle.get("qty_after_transaction"))
valuation_rate = flt(previous_sle.get("valuation_rate"))
stock_queue = json.loads(previous_sle.get("stock_queue") or "[]")
stock_value = flt(previous_sle.get("stock_value"))
prev_stock_value = flt(previous_sle.get("stock_value"))
entries_to_fix = get_sle_after_datetime(previous_sle or \
{"item_code": args["item_code"], "warehouse": args["warehouse"]}, for_update=True)
valuation_method = get_valuation_method(args["item_code"])
stock_value_difference = 0.0
for sle in entries_to_fix:
if sle.serial_no or not cint(frappe.db.get_default("allow_negative_stock")):
# validate negative stock for serialized items, fifo valuation
# or when negative stock is not allowed for moving average
if not validate_negative_stock(qty_after_transaction, sle):
qty_after_transaction += flt(sle.actual_qty)
continue
if sle.serial_no:
valuation_rate = get_serialized_values(qty_after_transaction, sle, valuation_rate)
qty_after_transaction += flt(sle.actual_qty)
else:
if sle.voucher_type=="Stock Reconciliation":
valuation_rate = sle.valuation_rate
qty_after_transaction = sle.qty_after_transaction
stock_queue = [[qty_after_transaction, valuation_rate]]
else:
if valuation_method == "Moving Average":
valuation_rate = get_moving_average_values(qty_after_transaction, sle, valuation_rate, allow_zero_rate)
else:
valuation_rate = get_fifo_values(qty_after_transaction, sle, stock_queue, allow_zero_rate)
qty_after_transaction += flt(sle.actual_qty)
# get stock value
if sle.serial_no:
stock_value = qty_after_transaction * valuation_rate
elif valuation_method == "Moving Average":
stock_value = qty_after_transaction * valuation_rate
else:
stock_value = sum((flt(batch[0]) * flt(batch[1]) for batch in stock_queue))
# rounding as per precision
from frappe.model.meta import get_field_precision
meta = frappe.get_meta("Stock Ledger Entry")
stock_value = flt(stock_value, get_field_precision(meta.get_field("stock_value"),
frappe._dict({"fields": sle})))
stock_value_difference = stock_value - prev_stock_value
prev_stock_value = stock_value
# update current sle
frappe.db.sql("""update `tabStock Ledger Entry`
set qty_after_transaction=%s, valuation_rate=%s, stock_queue=%s,
stock_value=%s, stock_value_difference=%s where name=%s""",
(qty_after_transaction, valuation_rate,
json.dumps(stock_queue), stock_value, stock_value_difference, sle.name))
if _exceptions:
_raise_exceptions(args, verbose)
# update bin
if not frappe.db.exists({"doctype": "Bin", "item_code": args["item_code"],
"warehouse": args["warehouse"]}):
bin_wrapper = frappe.get_doc({
"doctype": "Bin",
"item_code": args["item_code"],
"warehouse": args["warehouse"],
})
bin_wrapper.ignore_permissions = 1
bin_wrapper.insert()
frappe.db.sql("""update `tabBin` set valuation_rate=%s, actual_qty=%s,
stock_value=%s,
projected_qty = (actual_qty + indented_qty + ordered_qty + planned_qty - reserved_qty)
where item_code=%s and warehouse=%s""", (valuation_rate, qty_after_transaction,
stock_value, args["item_code"], args["warehouse"]))
def get_sle_before_datetime(args, for_update=False):
"""
get previous stock ledger entry before current time-bucket
Details:
get the last sle before the current time-bucket, so that all values
are reposted from the current time-bucket onwards.
this is necessary because at the time of cancellation, there may be
entries between the cancelled entries in the same time-bucket
"""
sle = get_stock_ledger_entries(args,
["timestamp(posting_date, posting_time) < timestamp(%(posting_date)s, %(posting_time)s)"],
"desc", "limit 1", for_update=for_update)
return sle and sle[0] or frappe._dict()
def get_sle_after_datetime(args, for_update=False):
"""get Stock Ledger Entries after a particular datetime, for reposting"""
# NOTE: using for update of
conditions = ["timestamp(posting_date, posting_time) > timestamp(%(posting_date)s, %(posting_time)s)"]
# Excluding name: Workaround for MariaDB timestamp() floating microsecond issue
if args.get("name"):
conditions.append("name!=%(name)s")
return get_stock_ledger_entries(args, conditions, "asc", for_update=for_update)
def get_stock_ledger_entries(args, conditions=None, order="desc", limit=None, for_update=False):
"""get stock ledger entries filtered by specific posting datetime conditions"""
if not args.get("posting_date"):
args["posting_date"] = "1900-01-01"
if not args.get("posting_time"):
args["posting_time"] = "00:00"
return frappe.db.sql("""select *, timestamp(posting_date, posting_time) as "timestamp" from `tabStock Ledger Entry`
where item_code = %%(item_code)s
and warehouse = %%(warehouse)s
and ifnull(is_cancelled, 'No')='No'
%(conditions)s
order by timestamp(posting_date, posting_time) %(order)s, name %(order)s
%(limit)s %(for_update)s""" % {
"conditions": conditions and ("and " + " and ".join(conditions)) or "",
"limit": limit or "",
"for_update": for_update and "for update" or "",
"order": order
}, args, as_dict=1)
def validate_negative_stock(qty_after_transaction, sle):
"""
validate negative stock for entries current datetime onwards
will not consider cancelled entries
"""
diff = qty_after_transaction + flt(sle.actual_qty)
if not _exceptions:
frappe.local.stockledger_exceptions = []
if diff < 0 and abs(diff) > 0.0001:
# negative stock!
exc = sle.copy().update({"diff": diff})
_exceptions.append(exc)
return False
else:
return True
def get_serialized_values(qty_after_transaction, sle, valuation_rate):
incoming_rate = flt(sle.incoming_rate)
actual_qty = flt(sle.actual_qty)
serial_no = cstr(sle.serial_no).split("\n")
if incoming_rate < 0:
# wrong incoming rate
incoming_rate = valuation_rate
elif incoming_rate == 0 or flt(sle.actual_qty) < 0:
# In case of delivery/stock issue, get average purchase rate
# of serial nos of current entry
incoming_rate = flt(frappe.db.sql("""select avg(ifnull(purchase_rate, 0))
from `tabSerial No` where name in (%s)""" % (", ".join(["%s"]*len(serial_no))),
tuple(serial_no))[0][0])
if incoming_rate and not valuation_rate:
valuation_rate = incoming_rate
else:
new_stock_qty = qty_after_transaction + actual_qty
if new_stock_qty > 0:
new_stock_value = qty_after_transaction * valuation_rate + actual_qty * incoming_rate
if new_stock_value > 0:
# calculate new valuation rate only if stock value is positive
# else it remains the same as that of previous entry
valuation_rate = new_stock_value / new_stock_qty
return valuation_rate
def get_moving_average_values(qty_after_transaction, sle, valuation_rate, allow_zero_rate):
incoming_rate = flt(sle.incoming_rate)
actual_qty = flt(sle.actual_qty)
if flt(sle.actual_qty) > 0:
if qty_after_transaction < 0 and not valuation_rate:
# if negative stock, take current valuation rate as incoming rate
valuation_rate = incoming_rate
new_stock_qty = abs(qty_after_transaction) + actual_qty
new_stock_value = (abs(qty_after_transaction) * valuation_rate) + (actual_qty * incoming_rate)
if new_stock_qty:
valuation_rate = new_stock_value / flt(new_stock_qty)
elif not valuation_rate and qty_after_transaction <= 0:
valuation_rate = get_valuation_rate(sle.item_code, sle.warehouse, allow_zero_rate)
return abs(flt(valuation_rate))
def get_fifo_values(qty_after_transaction, sle, stock_queue, allow_zero_rate):
incoming_rate = flt(sle.incoming_rate)
actual_qty = flt(sle.actual_qty)
if actual_qty > 0:
if not stock_queue:
stock_queue.append([0, 0])
if stock_queue[-1][0] > 0:
stock_queue.append([actual_qty, incoming_rate])
else:
qty = stock_queue[-1][0] + actual_qty
if qty == 0:
stock_queue.pop(-1)
else:
stock_queue[-1] = [qty, incoming_rate]
else:
qty_to_pop = abs(actual_qty)
while qty_to_pop:
if not stock_queue:
stock_queue.append([0, get_valuation_rate(sle.item_code, sle.warehouse, allow_zero_rate)
if qty_after_transaction <= 0 else 0])
batch = stock_queue[0]
if qty_to_pop >= batch[0]:
# consume current batch
qty_to_pop = qty_to_pop - batch[0]
stock_queue.pop(0)
if not stock_queue and qty_to_pop:
# stock finished, qty still remains to be withdrawn
# negative stock, keep in as a negative batch
stock_queue.append([-qty_to_pop, batch[1]])
break
else:
# qty found in current batch
# consume it and exit
batch[0] = batch[0] - qty_to_pop
qty_to_pop = 0
stock_value = sum((flt(batch[0]) * flt(batch[1]) for batch in stock_queue))
stock_qty = sum((flt(batch[0]) for batch in stock_queue))
valuation_rate = (stock_value / flt(stock_qty)) if stock_qty else 0
return abs(valuation_rate)
def _raise_exceptions(args, verbose=1):
deficiency = min(e["diff"] for e in _exceptions)
msg = _("Negative Stock Error ({6}) for Item {0} in Warehouse {1} on {2} {3} in {4} {5}").format(args["item_code"],
args.get("warehouse"), _exceptions[0]["posting_date"], _exceptions[0]["posting_time"],
_(_exceptions[0]["voucher_type"]), _exceptions[0]["voucher_no"], deficiency)
if verbose:
frappe.throw(msg, NegativeStockError)
else:
raise NegativeStockError, msg
def get_previous_sle(args, for_update=False):
"""
get the last sle on or before the current time-bucket,
to get actual qty before transaction, this function
is called from various transaction like stock entry, reco etc
args = {
"item_code": "ABC",
"warehouse": "XYZ",
"posting_date": "2012-12-12",
"posting_time": "12:00",
"sle": "name of reference Stock Ledger Entry"
}
"""
if not args.get("sle"): args["sle"] = ""
sle = get_stock_ledger_entries(args, ["name != %(sle)s",
"timestamp(posting_date, posting_time) <= timestamp(%(posting_date)s, %(posting_time)s)"],
"desc", "limit 1", for_update=for_update)
return sle and sle[0] or {}
def get_valuation_rate(item_code, warehouse, allow_zero_rate=False):
last_valuation_rate = frappe.db.sql("""select valuation_rate
from `tabStock Ledger Entry`
where item_code = %s and warehouse = %s
and ifnull(valuation_rate, 0) > 0
order by posting_date desc, posting_time desc, name desc limit 1""", (item_code, warehouse))
if not last_valuation_rate:
last_valuation_rate = frappe.db.sql("""select valuation_rate
from `tabStock Ledger Entry`
where item_code = %s and ifnull(valuation_rate, 0) > 0
order by posting_date desc, posting_time desc, name desc limit 1""", item_code)
valuation_rate = flt(last_valuation_rate[0][0]) if last_valuation_rate else 0
if not valuation_rate:
valuation_rate = frappe.db.get_value("Item Price", {"item_code": item_code, "buying": 1}, "price_list_rate")
if not allow_zero_rate and not valuation_rate and cint(frappe.db.get_value("Accounts Settings", None, "auto_accounting_for_stock")):
frappe.throw(_("Purchase rate for item: {0} not found, which is required to book accounting entry (expense). Please mention item price against a buying price list.").format(item_code))
return valuation_rate
| agpl-3.0 |
zenoss/ZenPacks.chudler.GoogleAppEngine | ZenPacks/chudler/GoogleAppEngine/mechanize/_pullparser.py | 15 | 14326 | """A simple "pull API" for HTML parsing, after Perl's HTML::TokeParser.
Examples
This program extracts all links from a document. It will print one
line for each link, containing the URL and the textual description
between the <A>...</A> tags:
import pullparser, sys
f = file(sys.argv[1])
p = pullparser.PullParser(f)
for token in p.tags("a"):
if token.type == "endtag": continue
url = dict(token.attrs).get("href", "-")
text = p.get_compressed_text(endat=("endtag", "a"))
print "%s\t%s" % (url, text)
This program extracts the <TITLE> from the document:
import pullparser, sys
f = file(sys.argv[1])
p = pullparser.PullParser(f)
if p.get_tag("title"):
title = p.get_compressed_text()
print "Title: %s" % title
Copyright 2003-2006 John J. Lee <jjl@pobox.com>
Copyright 1998-2001 Gisle Aas (original libwww-perl code)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses.
"""
import re, htmlentitydefs
import sgmllib, HTMLParser
from xml.sax import saxutils
from _html import unescape, unescape_charref
class NoMoreTokensError(Exception): pass
class Token:
"""Represents an HTML tag, declaration, processing instruction etc.
Behaves as both a tuple-like object (ie. iterable) and has attributes
.type, .data and .attrs.
>>> t = Token("starttag", "a", [("href", "http://www.python.org/")])
>>> t == ("starttag", "a", [("href", "http://www.python.org/")])
True
>>> (t.type, t.data) == ("starttag", "a")
True
>>> t.attrs == [("href", "http://www.python.org/")]
True
Public attributes
type: one of "starttag", "endtag", "startendtag", "charref", "entityref",
"data", "comment", "decl", "pi", after the corresponding methods of
HTMLParser.HTMLParser
data: For a tag, the tag name; otherwise, the relevant data carried by the
tag, as a string
attrs: list of (name, value) pairs representing HTML attributes
(or None if token does not represent an opening tag)
"""
def __init__(self, type, data, attrs=None):
self.type = type
self.data = data
self.attrs = attrs
def __iter__(self):
return iter((self.type, self.data, self.attrs))
def __eq__(self, other):
type, data, attrs = other
if (self.type == type and
self.data == data and
self.attrs == attrs):
return True
else:
return False
def __ne__(self, other): return not self.__eq__(other)
def __repr__(self):
args = ", ".join(map(repr, [self.type, self.data, self.attrs]))
return self.__class__.__name__+"(%s)" % args
def __str__(self):
"""
>>> print Token("starttag", "br")
<br>
>>> print Token("starttag", "a",
... [("href", "http://www.python.org/"), ("alt", '"foo"')])
<a href="http://www.python.org/" alt='"foo"'>
>>> print Token("startendtag", "br")
<br />
>>> print Token("startendtag", "br", [("spam", "eggs")])
<br spam="eggs" />
>>> print Token("endtag", "p")
</p>
>>> print Token("charref", "38")
&
>>> print Token("entityref", "amp")
&
>>> print Token("data", "foo\\nbar")
foo
bar
>>> print Token("comment", "Life is a bowl\\nof cherries.")
<!--Life is a bowl
of cherries.-->
>>> print Token("decl", "decl")
<!decl>
>>> print Token("pi", "pi")
<?pi>
"""
if self.attrs is not None:
attrs = "".join([" %s=%s" % (k, saxutils.quoteattr(v)) for
k, v in self.attrs])
else:
attrs = ""
if self.type == "starttag":
return "<%s%s>" % (self.data, attrs)
elif self.type == "startendtag":
return "<%s%s />" % (self.data, attrs)
elif self.type == "endtag":
return "</%s>" % self.data
elif self.type == "charref":
return "&#%s;" % self.data
elif self.type == "entityref":
return "&%s;" % self.data
elif self.type == "data":
return self.data
elif self.type == "comment":
return "<!--%s-->" % self.data
elif self.type == "decl":
return "<!%s>" % self.data
elif self.type == "pi":
return "<?%s>" % self.data
assert False
def iter_until_exception(fn, exception, *args, **kwds):
while 1:
try:
yield fn(*args, **kwds)
except exception:
raise StopIteration
class _AbstractParser:
chunk = 1024
compress_re = re.compile(r"\s+")
def __init__(self, fh, textify={"img": "alt", "applet": "alt"},
encoding="ascii", entitydefs=None):
"""
fh: file-like object (only a .read() method is required) from which to
read HTML to be parsed
textify: mapping used by .get_text() and .get_compressed_text() methods
to represent opening tags as text
encoding: encoding used to encode numeric character references by
.get_text() and .get_compressed_text() ("ascii" by default)
entitydefs: mapping like {"amp": "&", ...} containing HTML entity
definitions (a sensible default is used). This is used to unescape
entities in .get_text() (and .get_compressed_text()) and attribute
values. If the encoding can not represent the character, the entity
reference is left unescaped. Note that entity references (both
numeric - e.g. { or ઼ - and non-numeric - e.g. &) are
unescaped in attribute values and the return value of .get_text(), but
not in data outside of tags. Instead, entity references outside of
tags are represented as tokens. This is a bit odd, it's true :-/
If the element name of an opening tag matches a key in the textify
mapping then that tag is converted to text. The corresponding value is
used to specify which tag attribute to obtain the text from. textify
maps from element names to either:
- an HTML attribute name, in which case the HTML attribute value is
used as its text value along with the element name in square
brackets (eg."alt text goes here[IMG]", or, if the alt attribute
were missing, just "[IMG]")
- a callable object (eg. a function) which takes a Token and returns
the string to be used as its text value
If textify has no key for an element name, nothing is substituted for
the opening tag.
Public attributes:
encoding and textify: see above
"""
self._fh = fh
self._tokenstack = [] # FIFO
self.textify = textify
self.encoding = encoding
if entitydefs is None:
entitydefs = htmlentitydefs.name2codepoint
self._entitydefs = entitydefs
def __iter__(self): return self
def tags(self, *names):
return iter_until_exception(self.get_tag, NoMoreTokensError, *names)
def tokens(self, *tokentypes):
return iter_until_exception(self.get_token, NoMoreTokensError,
*tokentypes)
def next(self):
try:
return self.get_token()
except NoMoreTokensError:
raise StopIteration()
def get_token(self, *tokentypes):
"""Pop the next Token object from the stack of parsed tokens.
If arguments are given, they are taken to be token types in which the
caller is interested: tokens representing other elements will be
skipped. Element names must be given in lower case.
Raises NoMoreTokensError.
"""
while 1:
while self._tokenstack:
token = self._tokenstack.pop(0)
if tokentypes:
if token.type in tokentypes:
return token
else:
return token
data = self._fh.read(self.chunk)
if not data:
raise NoMoreTokensError()
self.feed(data)
def unget_token(self, token):
"""Push a Token back onto the stack."""
self._tokenstack.insert(0, token)
def get_tag(self, *names):
"""Return the next Token that represents an opening or closing tag.
If arguments are given, they are taken to be element names in which the
caller is interested: tags representing other elements will be skipped.
Element names must be given in lower case.
Raises NoMoreTokensError.
"""
while 1:
tok = self.get_token()
if tok.type not in ["starttag", "endtag", "startendtag"]:
continue
if names:
if tok.data in names:
return tok
else:
return tok
def get_text(self, endat=None):
"""Get some text.
endat: stop reading text at this tag (the tag is included in the
returned text); endtag is a tuple (type, name) where type is
"starttag", "endtag" or "startendtag", and name is the element name of
the tag (element names must be given in lower case)
If endat is not given, .get_text() will stop at the next opening or
closing tag, or when there are no more tokens (no exception is raised).
Note that .get_text() includes the text representation (if any) of the
opening tag, but pushes the opening tag back onto the stack. As a
result, if you want to call .get_text() again, you need to call
.get_tag() first (unless you want an empty string returned when you
next call .get_text()).
Entity references are translated using the value of the entitydefs
constructor argument (a mapping from names to characters like that
provided by the standard module htmlentitydefs). Named entity
references that are not in this mapping are left unchanged.
The textify attribute is used to translate opening tags into text: see
the class docstring.
"""
text = []
tok = None
while 1:
try:
tok = self.get_token()
except NoMoreTokensError:
# unget last token (not the one we just failed to get)
if tok: self.unget_token(tok)
break
if tok.type == "data":
text.append(tok.data)
elif tok.type == "entityref":
t = unescape("&%s;"%tok.data, self._entitydefs, self.encoding)
text.append(t)
elif tok.type == "charref":
t = unescape_charref(tok.data, self.encoding)
text.append(t)
elif tok.type in ["starttag", "endtag", "startendtag"]:
tag_name = tok.data
if tok.type in ["starttag", "startendtag"]:
alt = self.textify.get(tag_name)
if alt is not None:
if callable(alt):
text.append(alt(tok))
elif tok.attrs is not None:
for k, v in tok.attrs:
if k == alt:
text.append(v)
text.append("[%s]" % tag_name.upper())
if endat is None or endat == (tok.type, tag_name):
self.unget_token(tok)
break
return "".join(text)
def get_compressed_text(self, *args, **kwds):
"""
As .get_text(), but collapses each group of contiguous whitespace to a
single space character, and removes all initial and trailing
whitespace.
"""
text = self.get_text(*args, **kwds)
text = text.strip()
return self.compress_re.sub(" ", text)
def handle_startendtag(self, tag, attrs):
self._tokenstack.append(Token("startendtag", tag, attrs))
def handle_starttag(self, tag, attrs):
self._tokenstack.append(Token("starttag", tag, attrs))
def handle_endtag(self, tag):
self._tokenstack.append(Token("endtag", tag))
def handle_charref(self, name):
self._tokenstack.append(Token("charref", name))
def handle_entityref(self, name):
self._tokenstack.append(Token("entityref", name))
def handle_data(self, data):
self._tokenstack.append(Token("data", data))
def handle_comment(self, data):
self._tokenstack.append(Token("comment", data))
def handle_decl(self, decl):
self._tokenstack.append(Token("decl", decl))
def unknown_decl(self, data):
# XXX should this call self.error instead?
#self.error("unknown declaration: " + `data`)
self._tokenstack.append(Token("decl", data))
def handle_pi(self, data):
self._tokenstack.append(Token("pi", data))
def unescape_attr(self, name):
return unescape(name, self._entitydefs, self.encoding)
def unescape_attrs(self, attrs):
escaped_attrs = []
for key, val in attrs:
escaped_attrs.append((key, self.unescape_attr(val)))
return escaped_attrs
class PullParser(_AbstractParser, HTMLParser.HTMLParser):
def __init__(self, *args, **kwds):
HTMLParser.HTMLParser.__init__(self)
_AbstractParser.__init__(self, *args, **kwds)
def unescape(self, name):
# Use the entitydefs passed into constructor, not
# HTMLParser.HTMLParser's entitydefs.
return self.unescape_attr(name)
class TolerantPullParser(_AbstractParser, sgmllib.SGMLParser):
def __init__(self, *args, **kwds):
sgmllib.SGMLParser.__init__(self)
_AbstractParser.__init__(self, *args, **kwds)
def unknown_starttag(self, tag, attrs):
attrs = self.unescape_attrs(attrs)
self._tokenstack.append(Token("starttag", tag, attrs))
def unknown_endtag(self, tag):
self._tokenstack.append(Token("endtag", tag))
def _test():
import doctest, _pullparser
return doctest.testmod(_pullparser)
if __name__ == "__main__":
_test()
| gpl-2.0 |
Timtam/cards-against-humanity | editor/card_panel.py | 1 | 5429 | from const import *
from shared.card import CARD_BLACK, CARD_WHITE, Card
class CardPanel(wx.Panel):
def __init__(self, parent, card_id=-1, size=(ELEMENT_SIZE, ELEMENT_SIZE),
text="",
card_type=CARD_WHITE):
wx.Panel.__init__(self, parent=parent, size=size,
name=("card %d" %card_id), style=wx.SIMPLE_BORDER)
frame = self.GetTopLevelParent()
self.SetName(frame.translator.translate("Card {number}").format(number = card_id))
self.card = Card(id=card_id, text=text, type=card_type)
# subpanel for more free space between panel-border and text
self.subpanel = wx.Panel(self, name=self.GetName(), style=wx.NO_BORDER)
self.text = CardText(parent=self.subpanel, text=self.card.getCardText())
box = wx.BoxSizer(wx.VERTICAL)
box.Add(self.text, 1, wx.ALL | wx.EXPAND, 10)
self.subpanel.SetSizer(box)
box2 = wx.BoxSizer(wx.VERTICAL)
box2.Add(self.subpanel, 1, wx.ALL | wx.EXPAND, BORDER_CARD)
self.SetSizer(box2)
self.setColors()
# click
self.Bind(wx.EVT_LEFT_UP, self.onClick)
self.subpanel.Bind(wx.EVT_LEFT_UP, self.onClick)
self.text.Bind(wx.EVT_LEFT_UP, self.onClick)
# hover - right, i need them all, because the text lays in the panel;
# when you move mouse into the text, you will first enter the panel,
# then enter the subpanel and in the same time leave the panel again,
# then enter the text and leave the subpanel;
# the same thing happens on leaving: in text -> enter subpanel & leave text
# -> enter panel & leave subpanel
# i solved this using the flags below, kind of strange that it needs so
# much lines...
self.Bind(wx.EVT_ENTER_WINDOW, self.onEnteringPanel)
self.Bind(wx.EVT_LEAVE_WINDOW, self.onLeavingPanel)
self.subpanel.Bind(wx.EVT_ENTER_WINDOW, self.onEnteringSubPanel)
self.subpanel.Bind(wx.EVT_LEAVE_WINDOW, self.onLeavingSubPanel)
self.text.Bind(wx.EVT_ENTER_WINDOW, self.onEnteringText)
self.text.Bind(wx.EVT_LEAVE_WINDOW, self.onLeavingText)
# for navigation per keys
self.subpanel.Bind(wx.EVT_CHILD_FOCUS, self.onEnteringPanel)
self.subpanel.Bind(wx.EVT_KILL_FOCUS, self.onLeavingPanel)
# some necessary flags
self.entered_panel = self.entered_subpanel = self.entered_text = \
self.clicked = False
self.subpanel.Bind(wx.EVT_KEY_UP, self.onKeyPress)
def onClick(self, event):
self.setActive()
frame = self.GetTopLevelParent()
frame.right_window.setCard(self.card)
frame.right_window.current_card_text.SetFocus()
def setActive(self):
self.clicked = True
# if there already is an other "active" card, we need to "deactivate" (
# change colors to normal)
parent = self.GetParent()
if parent.active_card is not None and parent.active_card != self.card:
active_card = parent.getCard(parent.active_card)
# if active_card not found, the previous one got deleted
if active_card is not None:
active_card.clicked = False
active_card.setColors()
active_card.Refresh()
# set a color for clicked card ("active")
self.SetBackgroundColour(COLOR_ACTIVE_CARD)
self.Refresh()
parent.active_card = self.card
def onEnteringPanel(self, event):
if not self.clicked:
self.SetBackgroundColour(COLOR_HOVER_CARD)
self.Refresh()
self.entered_panel = True
def onLeavingPanel(self, event):
if not self.clicked:
if not self.entered_subpanel:
self.setColors()
self.Refresh()
self.entered_subpanel = False # fixing bug sometimes hover stays,
# when mouse moved fast
self.entered_panel = False
def onEnteringSubPanel(self, event):
if not self.clicked:
self.SetBackgroundColour(COLOR_HOVER_CARD)
self.Refresh()
self.entered_subpanel = True
def onLeavingSubPanel(self, event):
if not self.clicked:
if not self.entered_text and not self.entered_panel:
self.setColors()
self.Refresh()
self.entered_text = False
self.entered_panel = False
self.entered_subpanel = False
def onEnteringText(self, event):
if not self.clicked:
self.SetBackgroundColour(COLOR_HOVER_CARD)
self.Refresh()
self.entered_text = True
def onLeavingText(self, event):
if not self.clicked:
if not self.entered_subpanel:
self.setColors()
self.Refresh()
self.entered_subpanel = False
self.entered_text = False
def onKeyPress(self, e):
key_code = e.GetKeyCode()
if key_code == wx.WXK_RETURN:
self.onClick(e)
return
e.Skip()
def setColors(self):
if self.card.type is CARD_BLACK:
self.SetBackgroundColour("black")
self.subpanel.SetBackgroundColour("black")
self.text.SetBackgroundColour("black")
self.text.SetForegroundColour("white")
else:
self.SetBackgroundColour("white")
self.subpanel.SetBackgroundColour("white")
self.text.SetBackgroundColour("white")
self.text.SetForegroundColour("black")
class CardText(wx.StaticText):
def __init__(self, parent, text):
wx.StaticText.__init__(self, parent=parent, label=text)
# fix flickering
self.Bind(wx.EVT_ERASE_BACKGROUND, onEraseBackGround)
def onEraseBackGround(e):
# do nothing
# for flicker-fix
pass
| mit |
kepstin/picard | picard/ui/ui_edittagdialog.py | 2 | 5033 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/edittagdialog.ui'
#
# Created: Fri Jul 13 15:19:05 2012
# by: PyQt4 UI code generator 4.8.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_EditTagDialog(object):
def setupUi(self, EditTagDialog):
EditTagDialog.setObjectName(_fromUtf8("EditTagDialog"))
EditTagDialog.setWindowModality(QtCore.Qt.ApplicationModal)
EditTagDialog.resize(400, 250)
EditTagDialog.setFocusPolicy(QtCore.Qt.StrongFocus)
EditTagDialog.setModal(True)
self.verticalLayout_2 = QtGui.QVBoxLayout(EditTagDialog)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.tag_names = QtGui.QComboBox(EditTagDialog)
self.tag_names.setEditable(True)
self.tag_names.setObjectName(_fromUtf8("tag_names"))
self.verticalLayout_2.addWidget(self.tag_names)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.value_list = QtGui.QListWidget(EditTagDialog)
self.value_list.setFocusPolicy(QtCore.Qt.StrongFocus)
self.value_list.setTabKeyNavigation(False)
self.value_list.setProperty(_fromUtf8("showDropIndicator"), False)
self.value_list.setObjectName(_fromUtf8("value_list"))
self.horizontalLayout.addWidget(self.value_list)
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.edit_value = QtGui.QPushButton(EditTagDialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(100)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.edit_value.sizePolicy().hasHeightForWidth())
self.edit_value.setSizePolicy(sizePolicy)
self.edit_value.setMinimumSize(QtCore.QSize(100, 0))
self.edit_value.setAutoDefault(False)
self.edit_value.setObjectName(_fromUtf8("edit_value"))
self.verticalLayout.addWidget(self.edit_value)
self.add_value = QtGui.QPushButton(EditTagDialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(100)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.add_value.sizePolicy().hasHeightForWidth())
self.add_value.setSizePolicy(sizePolicy)
self.add_value.setMinimumSize(QtCore.QSize(100, 0))
self.add_value.setAutoDefault(False)
self.add_value.setObjectName(_fromUtf8("add_value"))
self.verticalLayout.addWidget(self.add_value)
self.remove_value = QtGui.QPushButton(EditTagDialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(120)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.remove_value.sizePolicy().hasHeightForWidth())
self.remove_value.setSizePolicy(sizePolicy)
self.remove_value.setMinimumSize(QtCore.QSize(120, 0))
self.remove_value.setAutoDefault(False)
self.remove_value.setObjectName(_fromUtf8("remove_value"))
self.verticalLayout.addWidget(self.remove_value)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.horizontalLayout.addLayout(self.verticalLayout)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.buttonbox = QtGui.QDialogButtonBox(EditTagDialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(150)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.buttonbox.sizePolicy().hasHeightForWidth())
self.buttonbox.setSizePolicy(sizePolicy)
self.buttonbox.setMinimumSize(QtCore.QSize(150, 0))
self.buttonbox.setOrientation(QtCore.Qt.Horizontal)
self.buttonbox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Save)
self.buttonbox.setObjectName(_fromUtf8("buttonbox"))
self.verticalLayout_2.addWidget(self.buttonbox)
self.retranslateUi(EditTagDialog)
QtCore.QObject.connect(self.buttonbox, QtCore.SIGNAL(_fromUtf8("accepted()")), EditTagDialog.accept)
QtCore.QObject.connect(self.buttonbox, QtCore.SIGNAL(_fromUtf8("rejected()")), EditTagDialog.reject)
QtCore.QMetaObject.connectSlotsByName(EditTagDialog)
def retranslateUi(self, EditTagDialog):
EditTagDialog.setWindowTitle(_("Edit Tag"))
self.edit_value.setText(_("Edit value"))
self.add_value.setText(_("Add value"))
self.remove_value.setText(_("Remove value"))
| gpl-2.0 |
home-assistant/home-assistant | tests/components/alexa/test_intent.py | 14 | 17698 | """The tests for the Alexa component."""
# pylint: disable=protected-access
import json
import pytest
from homeassistant.components import alexa
from homeassistant.components.alexa import intent
from homeassistant.const import CONTENT_TYPE_JSON
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
SESSION_ID = "amzn1.echo-api.session.0000000-0000-0000-0000-00000000000"
APPLICATION_ID = "amzn1.echo-sdk-ams.app.000000-d0ed-0000-ad00-000000d00ebe"
REQUEST_ID = "amzn1.echo-api.request.0000000-0000-0000-0000-00000000000"
AUTHORITY_ID = "amzn1.er-authority.000000-d0ed-0000-ad00-000000d00ebe.ZODIAC"
BUILTIN_AUTH_ID = "amzn1.er-authority.000000-d0ed-0000-ad00-000000d00ebe.TEST"
# pylint: disable=invalid-name
calls = []
NPR_NEWS_MP3_URL = "https://pd.npr.org/anon.npr-mp3/npr/news/newscast.mp3"
@pytest.fixture
def alexa_client(loop, hass, hass_client):
"""Initialize a Home Assistant server for testing this module."""
@callback
def mock_service(call):
calls.append(call)
hass.services.async_register("test", "alexa", mock_service)
assert loop.run_until_complete(
async_setup_component(
hass,
alexa.DOMAIN,
{
# Key is here to verify we allow other keys in config too
"homeassistant": {},
"alexa": {},
},
)
)
assert loop.run_until_complete(
async_setup_component(
hass,
"intent_script",
{
"intent_script": {
"WhereAreWeIntent": {
"speech": {
"type": "plain",
"text": """
{%- if is_state("device_tracker.paulus", "home")
and is_state("device_tracker.anne_therese",
"home") -%}
You are both home, you silly
{%- else -%}
Anne Therese is at {{
states("device_tracker.anne_therese")
}} and Paulus is at {{
states("device_tracker.paulus")
}}
{% endif %}
""",
}
},
"GetZodiacHoroscopeIntent": {
"speech": {
"type": "plain",
"text": "You told us your sign is {{ ZodiacSign }}.",
}
},
"AMAZON.PlaybackAction<object@MusicCreativeWork>": {
"speech": {
"type": "plain",
"text": "Playing {{ object_byArtist_name }}.",
}
},
"CallServiceIntent": {
"speech": {
"type": "plain",
"text": "Service called for {{ ZodiacSign }}",
},
"card": {
"type": "simple",
"title": "Card title for {{ ZodiacSign }}",
"content": "Card content: {{ ZodiacSign }}",
},
"action": {
"service": "test.alexa",
"data_template": {"hello": "{{ ZodiacSign }}"},
"entity_id": "switch.test",
},
},
APPLICATION_ID: {
"speech": {
"type": "plain",
"text": "LaunchRequest has been received.",
}
},
}
},
)
)
return loop.run_until_complete(hass_client())
def _intent_req(client, data=None):
return client.post(
intent.INTENTS_API_ENDPOINT,
data=json.dumps(data or {}),
headers={"content-type": CONTENT_TYPE_JSON},
)
async def test_intent_launch_request(alexa_client):
"""Test the launch of a request."""
data = {
"version": "1.0",
"session": {
"new": True,
"sessionId": SESSION_ID,
"application": {"applicationId": APPLICATION_ID},
"attributes": {},
"user": {"userId": "amzn1.account.AM3B00000000000000000000000"},
},
"request": {
"type": "LaunchRequest",
"requestId": REQUEST_ID,
"timestamp": "2015-05-13T12:34:56Z",
},
}
req = await _intent_req(alexa_client, data)
assert req.status == 200
data = await req.json()
text = data.get("response", {}).get("outputSpeech", {}).get("text")
assert text == "LaunchRequest has been received."
async def test_intent_launch_request_not_configured(alexa_client):
"""Test the launch of a request."""
data = {
"version": "1.0",
"session": {
"new": True,
"sessionId": SESSION_ID,
"application": {
"applicationId": "amzn1.echo-sdk-ams.app.000000-d0ed-0000-ad00-000000d00000"
},
"attributes": {},
"user": {"userId": "amzn1.account.AM3B00000000000000000000000"},
},
"request": {
"type": "LaunchRequest",
"requestId": REQUEST_ID,
"timestamp": "2015-05-13T12:34:56Z",
},
}
req = await _intent_req(alexa_client, data)
assert req.status == 200
data = await req.json()
text = data.get("response", {}).get("outputSpeech", {}).get("text")
assert text == "This intent is not yet configured within Home Assistant."
async def test_intent_request_with_slots(alexa_client):
"""Test a request with slots."""
data = {
"version": "1.0",
"session": {
"new": False,
"sessionId": SESSION_ID,
"application": {"applicationId": APPLICATION_ID},
"attributes": {
"supportedHoroscopePeriods": {
"daily": True,
"weekly": False,
"monthly": False,
}
},
"user": {"userId": "amzn1.account.AM3B00000000000000000000000"},
},
"request": {
"type": "IntentRequest",
"requestId": REQUEST_ID,
"timestamp": "2015-05-13T12:34:56Z",
"intent": {
"name": "GetZodiacHoroscopeIntent",
"slots": {"ZodiacSign": {"name": "ZodiacSign", "value": "virgo"}},
},
},
}
req = await _intent_req(alexa_client, data)
assert req.status == 200
data = await req.json()
text = data.get("response", {}).get("outputSpeech", {}).get("text")
assert text == "You told us your sign is virgo."
async def test_intent_request_with_slots_and_synonym_resolution(alexa_client):
"""Test a request with slots and a name synonym."""
data = {
"version": "1.0",
"session": {
"new": False,
"sessionId": SESSION_ID,
"application": {"applicationId": APPLICATION_ID},
"attributes": {
"supportedHoroscopePeriods": {
"daily": True,
"weekly": False,
"monthly": False,
}
},
"user": {"userId": "amzn1.account.AM3B00000000000000000000000"},
},
"request": {
"type": "IntentRequest",
"requestId": REQUEST_ID,
"timestamp": "2015-05-13T12:34:56Z",
"intent": {
"name": "GetZodiacHoroscopeIntent",
"slots": {
"ZodiacSign": {
"name": "ZodiacSign",
"value": "V zodiac",
"resolutions": {
"resolutionsPerAuthority": [
{
"authority": AUTHORITY_ID,
"status": {"code": "ER_SUCCESS_MATCH"},
"values": [{"value": {"name": "Virgo"}}],
},
{
"authority": BUILTIN_AUTH_ID,
"status": {"code": "ER_SUCCESS_NO_MATCH"},
"values": [{"value": {"name": "Test"}}],
},
]
},
}
},
},
},
}
req = await _intent_req(alexa_client, data)
assert req.status == 200
data = await req.json()
text = data.get("response", {}).get("outputSpeech", {}).get("text")
assert text == "You told us your sign is Virgo."
async def test_intent_request_with_slots_and_multi_synonym_resolution(alexa_client):
"""Test a request with slots and multiple name synonyms."""
data = {
"version": "1.0",
"session": {
"new": False,
"sessionId": SESSION_ID,
"application": {"applicationId": APPLICATION_ID},
"attributes": {
"supportedHoroscopePeriods": {
"daily": True,
"weekly": False,
"monthly": False,
}
},
"user": {"userId": "amzn1.account.AM3B00000000000000000000000"},
},
"request": {
"type": "IntentRequest",
"requestId": REQUEST_ID,
"timestamp": "2015-05-13T12:34:56Z",
"intent": {
"name": "GetZodiacHoroscopeIntent",
"slots": {
"ZodiacSign": {
"name": "ZodiacSign",
"value": "V zodiac",
"resolutions": {
"resolutionsPerAuthority": [
{
"authority": AUTHORITY_ID,
"status": {"code": "ER_SUCCESS_MATCH"},
"values": [{"value": {"name": "Virgo"}}],
},
{
"authority": BUILTIN_AUTH_ID,
"status": {"code": "ER_SUCCESS_MATCH"},
"values": [{"value": {"name": "Test"}}],
},
]
},
}
},
},
},
}
req = await _intent_req(alexa_client, data)
assert req.status == 200
data = await req.json()
text = data.get("response", {}).get("outputSpeech", {}).get("text")
assert text == "You told us your sign is V zodiac."
async def test_intent_request_with_slots_but_no_value(alexa_client):
"""Test a request with slots but no value."""
data = {
"version": "1.0",
"session": {
"new": False,
"sessionId": SESSION_ID,
"application": {"applicationId": APPLICATION_ID},
"attributes": {
"supportedHoroscopePeriods": {
"daily": True,
"weekly": False,
"monthly": False,
}
},
"user": {"userId": "amzn1.account.AM3B00000000000000000000000"},
},
"request": {
"type": "IntentRequest",
"requestId": REQUEST_ID,
"timestamp": "2015-05-13T12:34:56Z",
"intent": {
"name": "GetZodiacHoroscopeIntent",
"slots": {"ZodiacSign": {"name": "ZodiacSign"}},
},
},
}
req = await _intent_req(alexa_client, data)
assert req.status == 200
data = await req.json()
text = data.get("response", {}).get("outputSpeech", {}).get("text")
assert text == "You told us your sign is ."
async def test_intent_request_without_slots(hass, alexa_client):
"""Test a request without slots."""
data = {
"version": "1.0",
"session": {
"new": False,
"sessionId": SESSION_ID,
"application": {"applicationId": APPLICATION_ID},
"attributes": {
"supportedHoroscopePeriods": {
"daily": True,
"weekly": False,
"monthly": False,
}
},
"user": {"userId": "amzn1.account.AM3B00000000000000000000000"},
},
"request": {
"type": "IntentRequest",
"requestId": REQUEST_ID,
"timestamp": "2015-05-13T12:34:56Z",
"intent": {"name": "WhereAreWeIntent"},
},
}
req = await _intent_req(alexa_client, data)
assert req.status == 200
json = await req.json()
text = json.get("response", {}).get("outputSpeech", {}).get("text")
assert text == "Anne Therese is at unknown and Paulus is at unknown"
hass.states.async_set("device_tracker.paulus", "home")
hass.states.async_set("device_tracker.anne_therese", "home")
req = await _intent_req(alexa_client, data)
assert req.status == 200
json = await req.json()
text = json.get("response", {}).get("outputSpeech", {}).get("text")
assert text == "You are both home, you silly"
async def test_intent_request_calling_service(alexa_client):
"""Test a request for calling a service."""
data = {
"version": "1.0",
"session": {
"new": False,
"sessionId": SESSION_ID,
"application": {"applicationId": APPLICATION_ID},
"attributes": {},
"user": {"userId": "amzn1.account.AM3B00000000000000000000000"},
},
"request": {
"type": "IntentRequest",
"requestId": REQUEST_ID,
"timestamp": "2015-05-13T12:34:56Z",
"intent": {
"name": "CallServiceIntent",
"slots": {"ZodiacSign": {"name": "ZodiacSign", "value": "virgo"}},
},
},
}
call_count = len(calls)
req = await _intent_req(alexa_client, data)
assert req.status == 200
assert call_count + 1 == len(calls)
call = calls[-1]
assert call.domain == "test"
assert call.service == "alexa"
assert call.data.get("entity_id") == ["switch.test"]
assert call.data.get("hello") == "virgo"
data = await req.json()
assert data["response"]["card"]["title"] == "Card title for virgo"
assert data["response"]["card"]["content"] == "Card content: virgo"
assert data["response"]["outputSpeech"]["type"] == "PlainText"
assert data["response"]["outputSpeech"]["text"] == "Service called for virgo"
async def test_intent_session_ended_request(alexa_client):
"""Test the request for ending the session."""
data = {
"version": "1.0",
"session": {
"new": False,
"sessionId": SESSION_ID,
"application": {"applicationId": APPLICATION_ID},
"attributes": {
"supportedHoroscopePeriods": {
"daily": True,
"weekly": False,
"monthly": False,
}
},
"user": {"userId": "amzn1.account.AM3B00000000000000000000000"},
},
"request": {
"type": "SessionEndedRequest",
"requestId": REQUEST_ID,
"timestamp": "2015-05-13T12:34:56Z",
"reason": "USER_INITIATED",
},
}
req = await _intent_req(alexa_client, data)
assert req.status == 200
text = await req.text()
assert text == ""
async def test_intent_from_built_in_intent_library(alexa_client):
"""Test intents from the Built-in Intent Library."""
data = {
"request": {
"intent": {
"name": "AMAZON.PlaybackAction<object@MusicCreativeWork>",
"slots": {
"object.byArtist.name": {
"name": "object.byArtist.name",
"value": "the shins",
},
"object.composer.name": {"name": "object.composer.name"},
"object.contentSource": {"name": "object.contentSource"},
"object.era": {"name": "object.era"},
"object.genre": {"name": "object.genre"},
"object.name": {"name": "object.name"},
"object.owner.name": {"name": "object.owner.name"},
"object.select": {"name": "object.select"},
"object.sort": {"name": "object.sort"},
"object.type": {"name": "object.type", "value": "music"},
},
},
"timestamp": "2016-12-14T23:23:37Z",
"type": "IntentRequest",
"requestId": REQUEST_ID,
},
"session": {
"sessionId": SESSION_ID,
"application": {"applicationId": APPLICATION_ID},
},
}
req = await _intent_req(alexa_client, data)
assert req.status == 200
data = await req.json()
text = data.get("response", {}).get("outputSpeech", {}).get("text")
assert text == "Playing the shins."
| apache-2.0 |
ctxis/canape | CANAPE.Scripting/Lib/distutils/dir_util.py | 106 | 7877 | """distutils.dir_util
Utility functions for manipulating directories and directory trees."""
__revision__ = "$Id$"
import os
import errno
from distutils.errors import DistutilsFileError, DistutilsInternalError
from distutils import log
# cache for by mkpath() -- in addition to cheapening redundant calls,
# eliminates redundant "creating /foo/bar/baz" messages in dry-run mode
_path_created = {}
# I don't use os.makedirs because a) it's new to Python 1.5.2, and
# b) it blows up if the directory already exists (I want to silently
# succeed in that case).
def mkpath(name, mode=0777, verbose=1, dry_run=0):
"""Create a directory and any missing ancestor directories.
If the directory already exists (or if 'name' is the empty string, which
means the current directory, which of course exists), then do nothing.
Raise DistutilsFileError if unable to create some directory along the way
(eg. some sub-path exists, but is a file rather than a directory).
If 'verbose' is true, print a one-line summary of each mkdir to stdout.
Return the list of directories actually created.
"""
global _path_created
# Detect a common bug -- name is None
if not isinstance(name, basestring):
raise DistutilsInternalError, \
"mkpath: 'name' must be a string (got %r)" % (name,)
# XXX what's the better way to handle verbosity? print as we create
# each directory in the path (the current behaviour), or only announce
# the creation of the whole path? (quite easy to do the latter since
# we're not using a recursive algorithm)
name = os.path.normpath(name)
created_dirs = []
if os.path.isdir(name) or name == '':
return created_dirs
if _path_created.get(os.path.abspath(name)):
return created_dirs
(head, tail) = os.path.split(name)
tails = [tail] # stack of lone dirs to create
while head and tail and not os.path.isdir(head):
(head, tail) = os.path.split(head)
tails.insert(0, tail) # push next higher dir onto stack
# now 'head' contains the deepest directory that already exists
# (that is, the child of 'head' in 'name' is the highest directory
# that does *not* exist)
for d in tails:
#print "head = %s, d = %s: " % (head, d),
head = os.path.join(head, d)
abs_head = os.path.abspath(head)
if _path_created.get(abs_head):
continue
if verbose >= 1:
log.info("creating %s", head)
if not dry_run:
try:
os.mkdir(head, mode)
except OSError, exc:
if not (exc.errno == errno.EEXIST and os.path.isdir(head)):
raise DistutilsFileError(
"could not create '%s': %s" % (head, exc.args[-1]))
created_dirs.append(head)
_path_created[abs_head] = 1
return created_dirs
def create_tree(base_dir, files, mode=0777, verbose=1, dry_run=0):
"""Create all the empty directories under 'base_dir' needed to put 'files'
there.
'base_dir' is just the a name of a directory which doesn't necessarily
exist yet; 'files' is a list of filenames to be interpreted relative to
'base_dir'. 'base_dir' + the directory portion of every file in 'files'
will be created if it doesn't already exist. 'mode', 'verbose' and
'dry_run' flags are as for 'mkpath()'.
"""
# First get the list of directories to create
need_dir = {}
for file in files:
need_dir[os.path.join(base_dir, os.path.dirname(file))] = 1
need_dirs = need_dir.keys()
need_dirs.sort()
# Now create them
for dir in need_dirs:
mkpath(dir, mode, verbose=verbose, dry_run=dry_run)
def copy_tree(src, dst, preserve_mode=1, preserve_times=1,
preserve_symlinks=0, update=0, verbose=1, dry_run=0):
"""Copy an entire directory tree 'src' to a new location 'dst'.
Both 'src' and 'dst' must be directory names. If 'src' is not a
directory, raise DistutilsFileError. If 'dst' does not exist, it is
created with 'mkpath()'. The end result of the copy is that every
file in 'src' is copied to 'dst', and directories under 'src' are
recursively copied to 'dst'. Return the list of files that were
copied or might have been copied, using their output name. The
return value is unaffected by 'update' or 'dry_run': it is simply
the list of all files under 'src', with the names changed to be
under 'dst'.
'preserve_mode' and 'preserve_times' are the same as for
'copy_file'; note that they only apply to regular files, not to
directories. If 'preserve_symlinks' is true, symlinks will be
copied as symlinks (on platforms that support them!); otherwise
(the default), the destination of the symlink will be copied.
'update' and 'verbose' are the same as for 'copy_file'.
"""
from distutils.file_util import copy_file
if not dry_run and not os.path.isdir(src):
raise DistutilsFileError, \
"cannot copy tree '%s': not a directory" % src
try:
names = os.listdir(src)
except os.error, (errno, errstr):
if dry_run:
names = []
else:
raise DistutilsFileError, \
"error listing files in '%s': %s" % (src, errstr)
if not dry_run:
mkpath(dst, verbose=verbose)
outputs = []
for n in names:
src_name = os.path.join(src, n)
dst_name = os.path.join(dst, n)
if preserve_symlinks and os.path.islink(src_name):
link_dest = os.readlink(src_name)
if verbose >= 1:
log.info("linking %s -> %s", dst_name, link_dest)
if not dry_run:
os.symlink(link_dest, dst_name)
outputs.append(dst_name)
elif os.path.isdir(src_name):
outputs.extend(
copy_tree(src_name, dst_name, preserve_mode,
preserve_times, preserve_symlinks, update,
verbose=verbose, dry_run=dry_run))
else:
copy_file(src_name, dst_name, preserve_mode,
preserve_times, update, verbose=verbose,
dry_run=dry_run)
outputs.append(dst_name)
return outputs
def _build_cmdtuple(path, cmdtuples):
"""Helper for remove_tree()."""
for f in os.listdir(path):
real_f = os.path.join(path,f)
if os.path.isdir(real_f) and not os.path.islink(real_f):
_build_cmdtuple(real_f, cmdtuples)
else:
cmdtuples.append((os.remove, real_f))
cmdtuples.append((os.rmdir, path))
def remove_tree(directory, verbose=1, dry_run=0):
"""Recursively remove an entire directory tree.
Any errors are ignored (apart from being reported to stdout if 'verbose'
is true).
"""
from distutils.util import grok_environment_error
global _path_created
if verbose >= 1:
log.info("removing '%s' (and everything under it)", directory)
if dry_run:
return
cmdtuples = []
_build_cmdtuple(directory, cmdtuples)
for cmd in cmdtuples:
try:
cmd[0](cmd[1])
# remove dir from cache if it's already there
abspath = os.path.abspath(cmd[1])
if abspath in _path_created:
del _path_created[abspath]
except (IOError, OSError), exc:
log.warn(grok_environment_error(
exc, "error removing %s: " % directory))
def ensure_relative(path):
"""Take the full path 'path', and make it a relative path.
This is useful to make 'path' the second argument to os.path.join().
"""
drive, path = os.path.splitdrive(path)
if path[0:1] == os.sep:
path = drive + path[1:]
return path
| gpl-3.0 |
FlorianLudwig/odoo | addons/account/wizard/account_automatic_reconcile.py | 340 | 11604 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_automatic_reconcile(osv.osv_memory):
_name = 'account.automatic.reconcile'
_description = 'Automatic Reconcile'
_columns = {
'account_ids': fields.many2many('account.account', 'reconcile_account_rel', 'reconcile_id', 'account_id', 'Accounts to Reconcile', domain = [('reconcile','=',1)],),
'writeoff_acc_id': fields.many2one('account.account', 'Account'),
'journal_id': fields.many2one('account.journal', 'Journal'),
'period_id': fields.many2one('account.period', 'Period'),
'max_amount': fields.float('Maximum write-off amount'),
'power': fields.selection([(p, str(p)) for p in range(2, 5)], 'Power', required=True, help='Number of partial amounts that can be combined to find a balance point can be chosen as the power of the automatic reconciliation'),
'reconciled': fields.integer('Reconciled transactions', readonly=True),
'unreconciled': fields.integer('Not reconciled transactions', readonly=True),
'allow_write_off': fields.boolean('Allow write off')
}
def _get_reconciled(self, cr, uid, context=None):
if context is None:
context = {}
return context.get('reconciled', 0)
def _get_unreconciled(self, cr, uid, context=None):
if context is None:
context = {}
return context.get('unreconciled', 0)
_defaults = {
'reconciled': _get_reconciled,
'unreconciled': _get_unreconciled,
'power': 2
}
#TODO: cleanup and comment this code... For now, it is awfulllll
# (way too complex, and really slow)...
def do_reconcile(self, cr, uid, credits, debits, max_amount, power, writeoff_acc_id, period_id, journal_id, context=None):
"""
for one value of a credit, check all debits, and combination of them
depending on the power. It starts with a power of one and goes up
to the max power allowed.
"""
move_line_obj = self.pool.get('account.move.line')
if context is None:
context = {}
def check2(value, move_list, power):
def check(value, move_list, power):
for i in range(len(move_list)):
move = move_list[i]
if power == 1:
if abs(value - move[1]) <= max_amount + 0.00001:
return [move[0]]
else:
del move_list[i]
res = check(value - move[1], move_list, power-1)
move_list[i:i] = [move]
if res:
res.append(move[0])
return res
return False
for p in range(1, power+1):
res = check(value, move_list, p)
if res:
return res
return False
def check4(list1, list2, power):
"""
for a list of credit and debit and a given power, check if there
are matching tuples of credit and debits, check all debits, and combination of them
depending on the power. It starts with a power of one and goes up
to the max power allowed.
"""
def check3(value, list1, list2, list1power, power):
for i in range(len(list1)):
move = list1[i]
if list1power == 1:
res = check2(value + move[1], list2, power - 1)
if res:
return ([move[0]], res)
else:
del list1[i]
res = check3(value + move[1], list1, list2, list1power-1, power-1)
list1[i:i] = [move]
if res:
x, y = res
x.append(move[0])
return (x, y)
return False
for p in range(1, power):
res = check3(0, list1, list2, p, power)
if res:
return res
return False
def check5(list1, list2, max_power):
for p in range(2, max_power+1):
res = check4(list1, list2, p)
if res:
return res
return False
ok = True
reconciled = 0
while credits and debits and ok:
res = check5(credits, debits, power)
if res:
move_line_obj.reconcile(cr, uid, res[0] + res[1], 'auto', writeoff_acc_id, period_id, journal_id, context)
reconciled += len(res[0]) + len(res[1])
credits = [(id, credit) for (id, credit) in credits if id not in res[0]]
debits = [(id, debit) for (id, debit) in debits if id not in res[1]]
else:
ok = False
return (reconciled, len(credits)+len(debits))
def reconcile(self, cr, uid, ids, context=None):
move_line_obj = self.pool.get('account.move.line')
obj_model = self.pool.get('ir.model.data')
if context is None:
context = {}
form = self.browse(cr, uid, ids, context=context)[0]
max_amount = form.max_amount or 0.0
power = form.power
allow_write_off = form.allow_write_off
reconciled = unreconciled = 0
if not form.account_ids:
raise osv.except_osv(_('User Error!'), _('You must select accounts to reconcile.'))
for account_id in form.account_ids:
params = (account_id.id,)
if not allow_write_off:
query = """SELECT partner_id FROM account_move_line WHERE account_id=%s AND reconcile_id IS NULL
AND state <> 'draft' GROUP BY partner_id
HAVING ABS(SUM(debit-credit)) = 0.0 AND count(*)>0"""
else:
query = """SELECT partner_id FROM account_move_line WHERE account_id=%s AND reconcile_id IS NULL
AND state <> 'draft' GROUP BY partner_id
HAVING ABS(SUM(debit-credit)) < %s AND count(*)>0"""
params += (max_amount,)
# reconcile automatically all transactions from partners whose balance is 0
cr.execute(query, params)
partner_ids = [id for (id,) in cr.fetchall()]
for partner_id in partner_ids:
cr.execute(
"SELECT id " \
"FROM account_move_line " \
"WHERE account_id=%s " \
"AND partner_id=%s " \
"AND state <> 'draft' " \
"AND reconcile_id IS NULL",
(account_id.id, partner_id))
line_ids = [id for (id,) in cr.fetchall()]
if line_ids:
reconciled += len(line_ids)
if allow_write_off:
move_line_obj.reconcile(cr, uid, line_ids, 'auto', form.writeoff_acc_id.id, form.period_id.id, form.journal_id.id, context)
else:
move_line_obj.reconcile_partial(cr, uid, line_ids, 'manual', context=context)
# get the list of partners who have more than one unreconciled transaction
cr.execute(
"SELECT partner_id " \
"FROM account_move_line " \
"WHERE account_id=%s " \
"AND reconcile_id IS NULL " \
"AND state <> 'draft' " \
"AND partner_id IS NOT NULL " \
"GROUP BY partner_id " \
"HAVING count(*)>1",
(account_id.id,))
partner_ids = [id for (id,) in cr.fetchall()]
#filter?
for partner_id in partner_ids:
# get the list of unreconciled 'debit transactions' for this partner
cr.execute(
"SELECT id, debit " \
"FROM account_move_line " \
"WHERE account_id=%s " \
"AND partner_id=%s " \
"AND reconcile_id IS NULL " \
"AND state <> 'draft' " \
"AND debit > 0 " \
"ORDER BY date_maturity",
(account_id.id, partner_id))
debits = cr.fetchall()
# get the list of unreconciled 'credit transactions' for this partner
cr.execute(
"SELECT id, credit " \
"FROM account_move_line " \
"WHERE account_id=%s " \
"AND partner_id=%s " \
"AND reconcile_id IS NULL " \
"AND state <> 'draft' " \
"AND credit > 0 " \
"ORDER BY date_maturity",
(account_id.id, partner_id))
credits = cr.fetchall()
(rec, unrec) = self.do_reconcile(cr, uid, credits, debits, max_amount, power, form.writeoff_acc_id.id, form.period_id.id, form.journal_id.id, context)
reconciled += rec
unreconciled += unrec
# add the number of transactions for partners who have only one
# unreconciled transactions to the unreconciled count
partner_filter = partner_ids and 'AND partner_id not in (%s)' % ','.join(map(str, filter(None, partner_ids))) or ''
cr.execute(
"SELECT count(*) " \
"FROM account_move_line " \
"WHERE account_id=%s " \
"AND reconcile_id IS NULL " \
"AND state <> 'draft' " + partner_filter,
(account_id.id,))
additional_unrec = cr.fetchone()[0]
unreconciled = unreconciled + additional_unrec
context = dict(context, reconciled=reconciled, unreconciled=unreconciled)
model_data_ids = obj_model.search(cr,uid,[('model','=','ir.ui.view'),('name','=','account_automatic_reconcile_view1')])
resource_id = obj_model.read(cr, uid, model_data_ids, fields=['res_id'])[0]['res_id']
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'account.automatic.reconcile',
'views': [(resource_id,'form')],
'type': 'ir.actions.act_window',
'target': 'new',
'context': context,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
neopenx/Dragon | Dragon/python/dragon/vm/tensorflow/ops/array_ops.py | 1 | 1712 | # --------------------------------------------------------
# TensorFlow @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
__all__ = [
'expand_dims',
'shape',
'zeros',
'ones',
'placeholder',
'concat',
'transpose',
'tile',
'reshape'
]
import dragon.ops as ops
from dragon.core.tensor import Tensor
from dragon.vm.tensorflow.framework import dtypes
def expand_dims(input, axis=None, name=None, dim=None):
if dim is not None:
if axis is not None:
raise ValueError("cannot specify both 'axis' and 'dim'.")
axis = dim
return ops.ExpandDims(input, axis=axis, name=name)
def shape(input, name=None, out_type=dtypes.float32):
return ops.Shape(input, name=None)
def zeros(shape, dtype=dtypes.float32, name=None):
return ops.Fill(shape, value=0.0, name=name)
def ones(shape, dtype=dtypes.float32, name=None):
return ops.Fill(shape, value=1.0, name=name)
def placeholder(dtype, shape=None, name=None):
# check data type
if dtype is not None:
if not isinstance(dtype, dtypes.DType):
raise TypeError('The dtype should be a valid tf data type.')
dtype = dtype.name
return Tensor(name=name, shape=shape, dtype=dtype).Placeholder()
def concat(values, axis, name=None):
return ops.Concat(values, axis=axis, name=name)
def transpose(a, perm=None, name=None):
return ops.Transpose(a, perm=perm, name=name)
def tile(input, multiples, name=None):
return ops.Tile(input, multiples=multiples, name=name)
def reshape(tensor, shape, name=None):
return ops.Reshape(tensor, shape=shape, name=None)
| bsd-2-clause |
kuza55/keras | examples/mnist_mlp.py | 7 | 1738 | '''Trains a simple deep NN on the MNIST dataset.
Gets to 98.40% test accuracy after 20 epochs
(there is *a lot* of margin for parameter tuning).
2 seconds per epoch on a K520 GPU.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import np_utils
batch_size = 128
nb_classes = 10
nb_epoch = 20
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
history = model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
| mit |
tmeits/pybrain | pybrain/auxiliary/gaussprocess.py | 25 | 9240 | from __future__ import print_function
__author__ = 'Thomas Rueckstiess, ruecksti@in.tum.de; Christian Osendorfer, osendorf@in.tum.de'
from scipy import r_, exp, zeros, eye, array, asarray, random, ravel, diag, sqrt, sin, cos, sort, mgrid, dot, floor
from scipy import c_ #@UnusedImport
from scipy.linalg import solve, inv
from pybrain.datasets import SupervisedDataSet
from scipy.linalg import norm
class GaussianProcess:
""" This class represents a basic n-dimensional Gaussian Process. The implementation
follows the book 'Gaussian Processes for Machine Learning' by Carl E. Rasmussen
(an online version is available at: http://www.gaussianprocess.org/gpml/chapters/).
The hyper parameters of the GP can be adjusted by setting the self.hyper varible,
which must be a tuple of size 3.
"""
def __init__(self, indim, start=0, stop=1, step=0.1):
""" initializes the gaussian process object.
:arg indim: input dimension
:key start: start of interval for sampling the GP.
:key stop: stop of interval for sampling the GP.
:key step: stepsize for sampling interval.
:note: start, stop, step can either be scalars or tuples of size 'indim'.
"""
self.mean = 0
self.start = start
self.stop = stop
self.step = step
self.indim = indim
self.trainx = zeros((0, indim), float)
self.trainy = zeros((0), float)
self.noise = zeros((0), float)
self.testx = self._buildGrid()
self.calculated = True
self.pred_mean = zeros(len(self.testx))
self.pred_cov = eye(len(self.testx))
self.autonoise = False
self.hyper = (0.5, 2.0, 0.1)
def _kernel(self, a, b):
""" kernel function, here RBF kernel """
(l, sigma_f, _sigma_n) = self.hyper
r = sigma_f ** 2 * exp(-1.0 / (2 * l ** 2) * norm(a - b, 2) ** 2)
# if a == b:
# r += sigma_n**2
return r
def _buildGrid(self):
(start, stop, step) = (self.start, self.stop, self.step)
""" returns a mgrid type of array for 'dim' dimensions """
if isinstance(start, (int, float, complex)):
dimstr = 'start:stop:step, '*self.indim
else:
assert len(start) == len(stop) == len(step)
dimstr = ["start[%i]:stop[%i]:step[%i], " % (i, i, i) for i in range(len(start))]
dimstr = ''.join(dimstr)
return eval('c_[map(ravel, mgrid[' + dimstr + '])]').T
def _buildCov(self, a, b):
K = zeros((len(a), len(b)), float)
for i in range(len(a)):
for j in range(len(b)):
K[i, j] = self._kernel(a[i, :], b[j, :])
return K
def reset(self):
self.trainx = zeros((0, self.indim), float)
self.trainy = zeros((0), float)
self.noise = zeros((0), float)
self.pred_mean = zeros(len(self.testx))
self.pred_cov = eye(len(self.testx))
def trainOnDataset(self, dataset):
""" takes a SequentialDataSet with indim input dimension and scalar target """
assert (dataset.getDimension('input') == self.indim)
assert (dataset.getDimension('target') == 1)
self.trainx = dataset.getField('input')
self.trainy = ravel(dataset.getField('target'))
self.noise = array([0.001] * len(self.trainx))
# print(self.trainx, self.trainy)
self.calculated = False
def addDataset(self, dataset):
""" adds the points from the dataset to the training set """
assert (dataset.getDimension('input') == self.indim)
assert (dataset.getDimension('target') == 1)
self.trainx = r_[self.trainx, dataset.getField('input')]
self.trainy = r_[self.trainy, ravel(dataset.getField('target'))]
self.noise = array([0.001] * len(self.trainx))
self.calculated = False
def addSample(self, train, target):
self.trainx = r_[self.trainx, asarray([train])]
self.trainy = r_[self.trainy, asarray(target)]
self.noise = r_[self.noise, array([0.001])]
self.calculated = False
def testOnArray(self, arr):
self.testx = arr
self._calculate()
return self.pred_mean
def _calculate(self):
# calculate only of necessary
if len(self.trainx) == 0:
return
# build covariance matrices
train_train = self._buildCov(self.trainx, self.trainx)
train_test = self._buildCov(self.trainx, self.testx)
test_train = train_test.T
test_test = self._buildCov(self.testx, self.testx)
# calculate predictive mean and covariance
K = train_train + self.noise * eye(len(self.trainx))
if self.autonoise:
# calculate average neighboring distance for auto-noise
avgdist = 0
sort_trainx = sort(self.trainx)
for i, d in enumerate(sort_trainx):
if i == 0:
continue
avgdist += d - sort_trainx[i - 1]
avgdist /= len(sort_trainx) - 1
# sort(self.trainx)
# add auto-noise from neighbouring samples (not standard gp)
for i in range(len(self.trainx)):
for j in range(len(self.trainx)):
if norm(self.trainx[i] - self.trainx[j]) > avgdist:
continue
d = norm(self.trainy[i] - self.trainy[j]) / (exp(norm(self.trainx[i] - self.trainx[j])))
K[i, i] += d
self.pred_mean = self.mean + dot(test_train, solve(K, self.trainy - self.mean, sym_pos=0))
self.pred_cov = test_test - dot(test_train, dot(inv(K), train_test))
self.calculated = True
def draw(self):
if not self.calculated:
self._calculate()
return self.pred_mean + random.multivariate_normal(zeros(len(self.testx)), self.pred_cov)
def plotCurves(self, showSamples=False, force2D=True):
from pylab import clf, hold, plot, fill, title, gcf, pcolor, gray
if not self.calculated:
self._calculate()
if self.indim == 1:
clf()
hold(True)
if showSamples:
# plot samples (gray)
for _ in range(5):
plot(self.testx, self.pred_mean + random.multivariate_normal(zeros(len(self.testx)), self.pred_cov), color='gray')
# plot training set
plot(self.trainx, self.trainy, 'bx')
# plot mean (blue)
plot(self.testx, self.pred_mean, 'b', linewidth=1)
# plot variance (as "polygon" going from left to right for upper half and back for lower half)
fillx = r_[ravel(self.testx), ravel(self.testx[::-1])]
filly = r_[self.pred_mean + 2 * diag(self.pred_cov), self.pred_mean[::-1] - 2 * diag(self.pred_cov)[::-1]]
fill(fillx, filly, facecolor='gray', edgecolor='white', alpha=0.3)
title('1D Gaussian Process with mean and variance')
elif self.indim == 2 and not force2D:
from matplotlib import axes3d as a3
fig = gcf()
fig.clear()
ax = a3.Axes3D(fig) #@UndefinedVariable
# plot training set
ax.plot3D(ravel(self.trainx[:, 0]), ravel(self.trainx[:, 1]), ravel(self.trainy), 'ro')
# plot mean
(x, y, z) = [m.reshape(sqrt(len(m)), sqrt(len(m))) for m in (self.testx[:, 0], self.testx[:, 1], self.pred_mean)]
ax.plot_wireframe(x, y, z, colors='gray')
return ax
elif self.indim == 2 and force2D:
# plot mean on pcolor map
gray()
# (x, y, z) = map(lambda m: m.reshape(sqrt(len(m)), sqrt(len(m))), (self.testx[:,0], self.testx[:,1], self.pred_mean))
m = floor(sqrt(len(self.pred_mean)))
pcolor(self.pred_mean.reshape(m, m)[::-1, :])
else: print("plotting only supported for indim=1 or indim=2.")
if __name__ == '__main__':
from pylab import figure, show
# --- example on how to use the GP in 1 dimension
ds = SupervisedDataSet(1, 1)
gp = GaussianProcess(indim=1, start= -3, stop=3, step=0.05)
figure()
x = mgrid[-3:3:0.2]
y = 0.1 * x ** 2 + x + 1
z = sin(x) + 0.5 * cos(y)
ds.addSample(-2.5, -1)
ds.addSample(-1.0, 3)
gp.mean = 0
# new feature "autonoise" adds uncertainty to data depending on
# it's distance to other points in the dataset. not tested much yet.
# gp.autonoise = True
gp.trainOnDataset(ds)
gp.plotCurves(showSamples=True)
# you can also test the gp on single points, but this deletes the
# original testing grid. it can be restored with a call to _buildGrid()
print((gp.testOnArray(array([[0.4]]))))
# --- example on how to use the GP in 2 dimensions
ds = SupervisedDataSet(2, 1)
gp = GaussianProcess(indim=2, start=0, stop=5, step=0.2)
figure()
x, y = mgrid[0:5:4j, 0:5:4j]
z = cos(x) * sin(y)
(x, y, z) = list(map(ravel, [x, y, z]))
for i, j, k in zip(x, y, z):
ds.addSample([i, j], [k])
gp.trainOnDataset(ds)
gp.plotCurves()
show()
| bsd-3-clause |
MrNuggles/HeyBoet-Telegram-Bot | temboo/Library/Google/Spreadsheets/StructuredQuery.py | 4 | 6133 | # -*- coding: utf-8 -*-
###############################################################################
#
# StructuredQuery
# Retrieves a list-based feed containing data in your Google spreadsheet that meets a specified criteria.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class StructuredQuery(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the StructuredQuery Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(StructuredQuery, self).__init__(temboo_session, '/Library/Google/Spreadsheets/StructuredQuery')
def new_input_set(self):
return StructuredQueryInputSet()
def _make_result_set(self, result, path):
return StructuredQueryResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return StructuredQueryChoreographyExecution(session, exec_id, path)
class StructuredQueryInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the StructuredQuery
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid Access Token retrieved during the OAuth process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new Access Token.)
"""
super(StructuredQueryInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.)
"""
super(StructuredQueryInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.)
"""
super(StructuredQueryInputSet, self)._set_input('ClientSecret', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((optional, password) Deprecated (retained for backward compatibility only).)
"""
super(StructuredQueryInputSet, self)._set_input('Password', value)
def set_Query(self, value):
"""
Set the value of the Query input for this Choreo. ((required, string) A valid structured query (i.e. id>4).)
"""
super(StructuredQueryInputSet, self)._set_input('Query', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth Refresh Token used to generate a new Access Token when the original token is expired. Required unless providing a valid AccessToken.)
"""
super(StructuredQueryInputSet, self)._set_input('RefreshToken', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: xml (the default) and json.)
"""
super(StructuredQueryInputSet, self)._set_input('ResponseFormat', value)
def set_SpreadsheetKey(self, value):
"""
Set the value of the SpreadsheetKey input for this Choreo. ((required, string) The unique key of the spreadsheet associated with the feed you want to retrieve.)
"""
super(StructuredQueryInputSet, self)._set_input('SpreadsheetKey', value)
def set_Username(self, value):
"""
Set the value of the Username input for this Choreo. ((optional, string) Deprecated (retained for backward compatibility only).)
"""
super(StructuredQueryInputSet, self)._set_input('Username', value)
def set_WorksheetId(self, value):
"""
Set the value of the WorksheetId input for this Choreo. ((required, string) The unique ID of the worksheet associated with the feed you want to retrieve.)
"""
super(StructuredQueryInputSet, self)._set_input('WorksheetId', value)
class StructuredQueryResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the StructuredQuery Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Google.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
class StructuredQueryChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return StructuredQueryResultSet(response, path)
| gpl-3.0 |
tumbl3w33d/ansible | lib/ansible/modules/cloud/digital_ocean/digital_ocean_certificate_info.py | 25 | 3570 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: digital_ocean_certificate_info
short_description: Gather information about DigitalOcean certificates
description:
- This module can be used to gather information about DigitalOcean provided certificates.
- This module was called C(digital_ocean_certificate_facts) before Ansible 2.9. The usage did not change.
author: "Abhijeet Kasurde (@Akasurde)"
version_added: "2.6"
options:
certificate_id:
description:
- Certificate ID that can be used to identify and reference a certificate.
required: false
requirements:
- "python >= 2.6"
extends_documentation_fragment: digital_ocean.documentation
'''
EXAMPLES = '''
- name: Gather information about all certificates
digital_ocean_certificate_info:
oauth_token: "{{ oauth_token }}"
- name: Gather information about certificate with given id
digital_ocean_certificate_info:
oauth_token: "{{ oauth_token }}"
certificate_id: "892071a0-bb95-49bc-8021-3afd67a210bf"
- name: Get not after information about certificate
digital_ocean_certificate_info:
register: resp_out
- set_fact:
not_after_date: "{{ item.not_after }}"
loop: "{{ resp_out.data|json_query(name) }}"
vars:
name: "[?name=='web-cert-01']"
- debug: var=not_after_date
'''
RETURN = '''
data:
description: DigitalOcean certificate information
returned: success
type: list
sample: [
{
"id": "892071a0-bb95-49bc-8021-3afd67a210bf",
"name": "web-cert-01",
"not_after": "2017-02-22T00:23:00Z",
"sha1_fingerprint": "dfcc9f57d86bf58e321c2c6c31c7a971be244ac7",
"created_at": "2017-02-08T16:02:37Z"
},
]
'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.digital_ocean import DigitalOceanHelper
from ansible.module_utils._text import to_native
def core(module):
certificate_id = module.params.get('certificate_id', None)
rest = DigitalOceanHelper(module)
base_url = 'certificates?'
if certificate_id is not None:
response = rest.get("%s/%s" % (base_url, certificate_id))
status_code = response.status_code
if status_code != 200:
module.fail_json(msg="Failed to retrieve certificates for DigitalOcean")
resp_json = response.json
certificate = resp_json['certificate']
else:
certificate = rest.get_paginated_data(base_url=base_url, data_key_name='certificates')
module.exit_json(changed=False, data=certificate)
def main():
argument_spec = DigitalOceanHelper.digital_ocean_argument_spec()
argument_spec.update(
certificate_id=dict(type='str', required=False),
)
module = AnsibleModule(argument_spec=argument_spec)
if module._name == 'digital_ocean_certificate_facts':
module.deprecate("The 'digital_ocean_certificate_facts' module has been renamed to 'digital_ocean_certificate_info'", version='2.13')
try:
core(module)
except Exception as e:
module.fail_json(msg=to_native(e), exception=format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 |
jakubroztocil/httpie | tests/test_auth_plugins.py | 2 | 3696 | from mock import mock
from httpie.cli.constants import SEPARATOR_CREDENTIALS
from httpie.plugins import AuthPlugin
from httpie.plugins.registry import plugin_manager
from utils import http, HTTP_OK
# TODO: run all these tests in session mode as well
USERNAME = 'user'
PASSWORD = 'password'
# Basic auth encoded `USERNAME` and `PASSWORD`
# noinspection SpellCheckingInspection
BASIC_AUTH_HEADER_VALUE = 'Basic dXNlcjpwYXNzd29yZA=='
BASIC_AUTH_URL = '/basic-auth/{0}/{1}'.format(USERNAME, PASSWORD)
AUTH_OK = {'authenticated': True, 'user': USERNAME}
def basic_auth(header=BASIC_AUTH_HEADER_VALUE):
def inner(r):
r.headers['Authorization'] = header
return r
return inner
def test_auth_plugin_parse_auth_false(httpbin):
class Plugin(AuthPlugin):
auth_type = 'test-parse-false'
auth_parse = False
def get_auth(self, username=None, password=None):
assert username is None
assert password is None
assert self.raw_auth == BASIC_AUTH_HEADER_VALUE
return basic_auth(self.raw_auth)
plugin_manager.register(Plugin)
try:
r = http(
httpbin + BASIC_AUTH_URL,
'--auth-type',
Plugin.auth_type,
'--auth',
BASIC_AUTH_HEADER_VALUE,
)
assert HTTP_OK in r
assert r.json == AUTH_OK
finally:
plugin_manager.unregister(Plugin)
def test_auth_plugin_require_auth_false(httpbin):
class Plugin(AuthPlugin):
auth_type = 'test-require-false'
auth_require = False
def get_auth(self, username=None, password=None):
assert self.raw_auth is None
assert username is None
assert password is None
return basic_auth()
plugin_manager.register(Plugin)
try:
r = http(
httpbin + BASIC_AUTH_URL,
'--auth-type',
Plugin.auth_type,
)
assert HTTP_OK in r
assert r.json == AUTH_OK
finally:
plugin_manager.unregister(Plugin)
def test_auth_plugin_require_auth_false_and_auth_provided(httpbin):
class Plugin(AuthPlugin):
auth_type = 'test-require-false-yet-provided'
auth_require = False
def get_auth(self, username=None, password=None):
assert self.raw_auth == USERNAME + SEPARATOR_CREDENTIALS + PASSWORD
assert username == USERNAME
assert password == PASSWORD
return basic_auth()
plugin_manager.register(Plugin)
try:
r = http(
httpbin + BASIC_AUTH_URL,
'--auth-type',
Plugin.auth_type,
'--auth',
USERNAME + SEPARATOR_CREDENTIALS + PASSWORD,
)
assert HTTP_OK in r
assert r.json == AUTH_OK
finally:
plugin_manager.unregister(Plugin)
@mock.patch('httpie.cli.argtypes.AuthCredentials._getpass',
new=lambda self, prompt: 'UNEXPECTED_PROMPT_RESPONSE')
def test_auth_plugin_prompt_password_false(httpbin):
class Plugin(AuthPlugin):
auth_type = 'test-prompt-false'
prompt_password = False
def get_auth(self, username=None, password=None):
assert self.raw_auth == USERNAME
assert username == USERNAME
assert password is None
return basic_auth()
plugin_manager.register(Plugin)
try:
r = http(
httpbin + BASIC_AUTH_URL,
'--auth-type',
Plugin.auth_type,
'--auth',
USERNAME,
)
assert HTTP_OK in r
assert r.json == AUTH_OK
finally:
plugin_manager.unregister(Plugin)
| bsd-3-clause |
FlorentChamault/My_sickbeard | lib/hachoir_metadata/video.py | 90 | 15568 | from lib.hachoir_core.field import MissingField
from lib.hachoir_metadata.metadata import (registerExtractor,
Metadata, RootMetadata, MultipleMetadata)
from lib.hachoir_metadata.metadata_item import QUALITY_GOOD
from lib.hachoir_metadata.safe import fault_tolerant
from lib.hachoir_parser.video import MovFile, AsfFile, FlvFile
from lib.hachoir_parser.video.asf import Descriptor as ASF_Descriptor
from lib.hachoir_parser.container import MkvFile
from lib.hachoir_parser.container.mkv import dateToDatetime
from lib.hachoir_core.i18n import _
from lib.hachoir_core.tools import makeUnicode, makePrintable, timedelta2seconds
from datetime import timedelta
class MkvMetadata(MultipleMetadata):
tag_key = {
"TITLE": "title",
"URL": "url",
"COPYRIGHT": "copyright",
# TODO: use maybe another name?
# Its value may be different than (...)/Info/DateUTC/date
"DATE_RECORDED": "creation_date",
# TODO: Extract subtitle metadata
"SUBTITLE": "subtitle_author",
}
def extract(self, mkv):
for segment in mkv.array("Segment"):
self.processSegment(segment)
def processSegment(self, segment):
for field in segment:
if field.name.startswith("Info["):
self.processInfo(field)
elif field.name.startswith("Tags["):
for tag in field.array("Tag"):
self.processTag(tag)
elif field.name.startswith("Tracks["):
self.processTracks(field)
elif field.name.startswith("Cluster["):
if self.quality < QUALITY_GOOD:
return
def processTracks(self, tracks):
for entry in tracks.array("TrackEntry"):
self.processTrack(entry)
def processTrack(self, track):
if "TrackType/enum" not in track:
return
if track["TrackType/enum"].display == "video":
self.processVideo(track)
elif track["TrackType/enum"].display == "audio":
self.processAudio(track)
elif track["TrackType/enum"].display == "subtitle":
self.processSubtitle(track)
def trackCommon(self, track, meta):
if "Name/unicode" in track:
meta.title = track["Name/unicode"].value
if "Language/string" in track \
and track["Language/string"].value not in ("mis", "und"):
meta.language = track["Language/string"].value
def processVideo(self, track):
video = Metadata(self)
self.trackCommon(track, video)
try:
video.compression = track["CodecID/string"].value
if "Video" in track:
video.width = track["Video/PixelWidth/unsigned"].value
video.height = track["Video/PixelHeight/unsigned"].value
except MissingField:
pass
self.addGroup("video[]", video, "Video stream")
def getDouble(self, field, parent):
float_key = '%s/float' % parent
if float_key in field:
return field[float_key].value
double_key = '%s/double' % parent
if double_key in field:
return field[double_key].value
return None
def processAudio(self, track):
audio = Metadata(self)
self.trackCommon(track, audio)
if "Audio" in track:
frequency = self.getDouble(track, "Audio/SamplingFrequency")
if frequency is not None:
audio.sample_rate = frequency
if "Audio/Channels/unsigned" in track:
audio.nb_channel = track["Audio/Channels/unsigned"].value
if "Audio/BitDepth/unsigned" in track:
audio.bits_per_sample = track["Audio/BitDepth/unsigned"].value
if "CodecID/string" in track:
audio.compression = track["CodecID/string"].value
self.addGroup("audio[]", audio, "Audio stream")
def processSubtitle(self, track):
sub = Metadata(self)
self.trackCommon(track, sub)
try:
sub.compression = track["CodecID/string"].value
except MissingField:
pass
self.addGroup("subtitle[]", sub, "Subtitle")
def processTag(self, tag):
for field in tag.array("SimpleTag"):
self.processSimpleTag(field)
def processSimpleTag(self, tag):
if "TagName/unicode" not in tag \
or "TagString/unicode" not in tag:
return
name = tag["TagName/unicode"].value
if name not in self.tag_key:
return
key = self.tag_key[name]
value = tag["TagString/unicode"].value
setattr(self, key, value)
def processInfo(self, info):
if "TimecodeScale/unsigned" in info:
duration = self.getDouble(info, "Duration")
if duration is not None:
try:
seconds = duration * info["TimecodeScale/unsigned"].value * 1e-9
self.duration = timedelta(seconds=seconds)
except OverflowError:
# Catch OverflowError for timedelta (long int too large
# to be converted to an int)
pass
if "DateUTC/date" in info:
try:
self.creation_date = dateToDatetime(info["DateUTC/date"].value)
except OverflowError:
pass
if "WritingApp/unicode" in info:
self.producer = info["WritingApp/unicode"].value
if "MuxingApp/unicode" in info:
self.producer = info["MuxingApp/unicode"].value
if "Title/unicode" in info:
self.title = info["Title/unicode"].value
class FlvMetadata(MultipleMetadata):
def extract(self, flv):
if "video[0]" in flv:
meta = Metadata(self)
self.extractVideo(flv["video[0]"], meta)
self.addGroup("video", meta, "Video stream")
if "audio[0]" in flv:
meta = Metadata(self)
self.extractAudio(flv["audio[0]"], meta)
self.addGroup("audio", meta, "Audio stream")
# TODO: Computer duration
# One technic: use last video/audio chunk and use timestamp
# But this is very slow
self.format_version = flv.description
if "metadata/entry[1]" in flv:
self.extractAMF(flv["metadata/entry[1]"])
if self.has('duration'):
self.bit_rate = flv.size / timedelta2seconds(self.get('duration'))
@fault_tolerant
def extractAudio(self, audio, meta):
if audio["codec"].display == "MP3" and "music_data" in audio:
meta.compression = audio["music_data"].description
else:
meta.compression = audio["codec"].display
meta.sample_rate = audio.getSampleRate()
if audio["is_16bit"].value:
meta.bits_per_sample = 16
else:
meta.bits_per_sample = 8
if audio["is_stereo"].value:
meta.nb_channel = 2
else:
meta.nb_channel = 1
@fault_tolerant
def extractVideo(self, video, meta):
meta.compression = video["codec"].display
def extractAMF(self, amf):
for entry in amf.array("item"):
self.useAmfEntry(entry)
@fault_tolerant
def useAmfEntry(self, entry):
key = entry["key"].value
if key == "duration":
self.duration = timedelta(seconds=entry["value"].value)
elif key == "creator":
self.producer = entry["value"].value
elif key == "audiosamplerate":
self.sample_rate = entry["value"].value
elif key == "framerate":
self.frame_rate = entry["value"].value
elif key == "metadatacreator":
self.producer = entry["value"].value
elif key == "metadatadate":
self.creation_date = entry.value
elif key == "width":
self.width = int(entry["value"].value)
elif key == "height":
self.height = int(entry["value"].value)
class MovMetadata(RootMetadata):
def extract(self, mov):
for atom in mov:
if "movie" in atom:
self.processMovie(atom["movie"])
@fault_tolerant
def processMovieHeader(self, hdr):
self.creation_date = hdr["creation_date"].value
self.last_modification = hdr["lastmod_date"].value
self.duration = timedelta(seconds=float(hdr["duration"].value) / hdr["time_scale"].value)
self.comment = _("Play speed: %.1f%%") % (hdr["play_speed"].value*100)
self.comment = _("User volume: %.1f%%") % (float(hdr["volume"].value)*100//255)
@fault_tolerant
def processTrackHeader(self, hdr):
width = int(hdr["frame_size_width"].value)
height = int(hdr["frame_size_height"].value)
if width and height:
self.width = width
self.height = height
def processTrack(self, atom):
for field in atom:
if "track_hdr" in field:
self.processTrackHeader(field["track_hdr"])
def processMovie(self, atom):
for field in atom:
if "track" in field:
self.processTrack(field["track"])
if "movie_hdr" in field:
self.processMovieHeader(field["movie_hdr"])
class AsfMetadata(MultipleMetadata):
EXT_DESC_TO_ATTR = {
"Encoder": "producer",
"ToolName": "producer",
"AlbumTitle": "album",
"Track": "track_number",
"TrackNumber": "track_total",
"Year": "creation_date",
"AlbumArtist": "author",
}
SKIP_EXT_DESC = set((
# Useless informations
"WMFSDKNeeded", "WMFSDKVersion",
"Buffer Average", "VBR Peak", "EncodingTime",
"MediaPrimaryClassID", "UniqueFileIdentifier",
))
def extract(self, asf):
if "header/content" in asf:
self.processHeader(asf["header/content"])
def processHeader(self, header):
compression = []
is_vbr = None
if "ext_desc/content" in header:
# Extract all data from ext_desc
data = {}
for desc in header.array("ext_desc/content/descriptor"):
self.useExtDescItem(desc, data)
# Have ToolName and ToolVersion? If yes, group them to producer key
if "ToolName" in data and "ToolVersion" in data:
self.producer = "%s (version %s)" % (data["ToolName"], data["ToolVersion"])
del data["ToolName"]
del data["ToolVersion"]
# "IsVBR" key
if "IsVBR" in data:
is_vbr = (data["IsVBR"] == 1)
del data["IsVBR"]
# Store data
for key, value in data.iteritems():
if key in self.EXT_DESC_TO_ATTR:
key = self.EXT_DESC_TO_ATTR[key]
else:
if isinstance(key, str):
key = makePrintable(key, "ISO-8859-1", to_unicode=True)
value = "%s=%s" % (key, value)
key = "comment"
setattr(self, key, value)
if "file_prop/content" in header:
self.useFileProp(header["file_prop/content"], is_vbr)
if "codec_list/content" in header:
for codec in header.array("codec_list/content/codec"):
if "name" in codec:
text = codec["name"].value
if "desc" in codec and codec["desc"].value:
text = "%s (%s)" % (text, codec["desc"].value)
compression.append(text)
audio_index = 1
video_index = 1
for index, stream_prop in enumerate(header.array("stream_prop")):
if "content/audio_header" in stream_prop:
meta = Metadata(self)
self.streamProperty(header, index, meta)
self.streamAudioHeader(stream_prop["content/audio_header"], meta)
if self.addGroup("audio[%u]" % audio_index, meta, "Audio stream #%u" % audio_index):
audio_index += 1
elif "content/video_header" in stream_prop:
meta = Metadata(self)
self.streamProperty(header, index, meta)
self.streamVideoHeader(stream_prop["content/video_header"], meta)
if self.addGroup("video[%u]" % video_index, meta, "Video stream #%u" % video_index):
video_index += 1
if "metadata/content" in header:
info = header["metadata/content"]
try:
self.title = info["title"].value
self.author = info["author"].value
self.copyright = info["copyright"].value
except MissingField:
pass
@fault_tolerant
def streamAudioHeader(self, audio, meta):
if not meta.has("compression"):
meta.compression = audio["twocc"].display
meta.nb_channel = audio["channels"].value
meta.sample_rate = audio["sample_rate"].value
meta.bits_per_sample = audio["bits_per_sample"].value
@fault_tolerant
def streamVideoHeader(self, video, meta):
meta.width = video["width"].value
meta.height = video["height"].value
if "bmp_info" in video:
bmp_info = video["bmp_info"]
if not meta.has("compression"):
meta.compression = bmp_info["codec"].display
meta.bits_per_pixel = bmp_info["bpp"].value
@fault_tolerant
def useExtDescItem(self, desc, data):
if desc["type"].value == ASF_Descriptor.TYPE_BYTE_ARRAY:
# Skip binary data
return
key = desc["name"].value
if "/" in key:
# Replace "WM/ToolName" with "ToolName"
key = key.split("/", 1)[1]
if key in self.SKIP_EXT_DESC:
# Skip some keys
return
value = desc["value"].value
if not value:
return
value = makeUnicode(value)
data[key] = value
@fault_tolerant
def useFileProp(self, prop, is_vbr):
self.creation_date = prop["creation_date"].value
self.duration = prop["play_duration"].value
if prop["seekable"].value:
self.comment = u"Is seekable"
value = prop["max_bitrate"].value
text = prop["max_bitrate"].display
if is_vbr is True:
text = "VBR (%s max)" % text
elif is_vbr is False:
text = "%s (CBR)" % text
else:
text = "%s (max)" % text
self.bit_rate = (value, text)
def streamProperty(self, header, index, meta):
key = "bit_rates/content/bit_rate[%u]/avg_bitrate" % index
if key in header:
meta.bit_rate = header[key].value
# TODO: Use codec list
# It doesn't work when the video uses /header/content/bitrate_mutex
# since the codec list are shared between streams but... how is it
# shared?
# key = "codec_list/content/codec[%u]" % index
# if key in header:
# codec = header[key]
# if "name" in codec:
# text = codec["name"].value
# if "desc" in codec and codec["desc"].value:
# meta.compression = "%s (%s)" % (text, codec["desc"].value)
# else:
# meta.compression = text
registerExtractor(MovFile, MovMetadata)
registerExtractor(AsfFile, AsfMetadata)
registerExtractor(FlvFile, FlvMetadata)
registerExtractor(MkvFile, MkvMetadata)
| gpl-3.0 |
santoshn/softboundcets-34 | softboundcets-llvm-clang34/tools/clang/docs/analyzer/conf.py | 6 | 8021 | # -*- coding: utf-8 -*-
#
# Clang Static Analyzer documentation build configuration file, created by
# sphinx-quickstart on Wed Jan 2 15:54:28 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Clang Static Analyzer'
copyright = u'2013, Analyzer Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.4'
# The full version, including alpha/beta/rc tags.
release = '3.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ClangStaticAnalyzerdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ClangStaticAnalyzer.tex', u'Clang Static Analyzer Documentation',
u'Analyzer Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'clangstaticanalyzer', u'Clang Static Analyzer Documentation',
[u'Analyzer Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ClangStaticAnalyzer', u'Clang Static Analyzer Documentation',
u'Analyzer Team', 'ClangStaticAnalyzer', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| bsd-3-clause |
vvw/gensim | gensim/summarization/keywords.py | 32 | 7295 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
from gensim.summarization.pagerank_weighted import pagerank_weighted as _pagerank
from gensim.summarization.textcleaner import clean_text_by_word as _clean_text_by_word
from gensim.summarization.textcleaner import tokenize_by_word as _tokenize_by_word
from gensim.summarization.commons import build_graph as _build_graph
from gensim.summarization.commons import remove_unreachable_nodes as _remove_unreachable_nodes
from itertools import combinations as _combinations
from six.moves.queue import Queue as _Queue
from six.moves import xrange
WINDOW_SIZE = 2
"""Check tags in http://www.clips.ua.ac.be/pages/mbsp-tags and use only first two letters
Example: filter for nouns and adjectives:
INCLUDING_FILTER = ['NN', 'JJ']"""
INCLUDING_FILTER = ['NN', 'JJ']
EXCLUDING_FILTER = []
def _get_pos_filters():
return frozenset(INCLUDING_FILTER), frozenset(EXCLUDING_FILTER)
def _get_words_for_graph(tokens):
include_filters, exclude_filters = _get_pos_filters()
if include_filters and exclude_filters:
raise ValueError("Can't use both include and exclude filters, should use only one")
result = []
for word, unit in tokens.iteritems():
if exclude_filters and unit.tag in exclude_filters:
continue
if (include_filters and unit.tag in include_filters) or not include_filters or not unit.tag:
result.append(unit.token)
return result
def _get_first_window(split_text):
return split_text[:WINDOW_SIZE]
def _set_graph_edge(graph, tokens, word_a, word_b):
if word_a in tokens and word_b in tokens:
lemma_a = tokens[word_a].token
lemma_b = tokens[word_b].token
edge = (lemma_a, lemma_b)
if graph.has_node(lemma_a) and graph.has_node(lemma_b) and not graph.has_edge(edge):
graph.add_edge(edge)
def _process_first_window(graph, tokens, split_text):
first_window = _get_first_window(split_text)
for word_a, word_b in _combinations(first_window, 2):
_set_graph_edge(graph, tokens, word_a, word_b)
def _init_queue(split_text):
queue = _Queue()
first_window = _get_first_window(split_text)
for word in first_window[1:]:
queue.put(word)
return queue
def _process_word(graph, tokens, queue, word):
for word_to_compare in _queue_iterator(queue):
_set_graph_edge(graph, tokens, word, word_to_compare)
def _update_queue(queue, word):
queue.get()
queue.put(word)
assert queue.qsize() == (WINDOW_SIZE - 1)
def _process_text(graph, tokens, split_text):
queue = _init_queue(split_text)
for i in xrange(WINDOW_SIZE, len(split_text)):
word = split_text[i]
_process_word(graph, tokens, queue, word)
_update_queue(queue, word)
def _queue_iterator(queue):
iterations = queue.qsize()
for i in xrange(iterations):
var = queue.get()
yield var
queue.put(var)
def _set_graph_edges(graph, tokens, split_text):
_process_first_window(graph, tokens, split_text)
_process_text(graph, tokens, split_text)
def _extract_tokens(lemmas, scores, ratio, words):
lemmas.sort(key=lambda s: scores[s], reverse=True)
# If no "words" option is selected, the number of sentences is
# reduced by the provided ratio, else, the ratio is ignored.
length = len(lemmas) * ratio if words is None else words
return [(scores[lemmas[i]], lemmas[i],) for i in range(int(length))]
def _lemmas_to_words(tokens):
lemma_to_word = {}
for word, unit in tokens.iteritems():
lemma = unit.token
if lemma in lemma_to_word:
lemma_to_word[lemma].append(word)
else:
lemma_to_word[lemma] = [word]
return lemma_to_word
def _get_keywords_with_score(extracted_lemmas, lemma_to_word):
"""
:param extracted_lemmas:list of tuples
:param lemma_to_word: dict of {lemma:list of words}
:return: dict of {keyword:score}
"""
keywords = {}
for score, lemma in extracted_lemmas:
keyword_list = lemma_to_word[lemma]
for keyword in keyword_list:
keywords[keyword] = score
return keywords
def _strip_word(word):
stripped_word_list = list(_tokenize_by_word(word))
return stripped_word_list[0] if stripped_word_list else ""
def _get_combined_keywords(_keywords, split_text):
"""
:param keywords:dict of keywords:scores
:param split_text: list of strings
:return: combined_keywords:list
"""
result = []
_keywords = _keywords.copy()
len_text = len(split_text)
for i in xrange(len_text):
word = _strip_word(split_text[i])
if word in _keywords:
combined_word = [word]
if i + 1 == len_text:
result.append(word) # appends last word if keyword and doesn't iterate
for j in xrange(i + 1, len_text):
other_word = _strip_word(split_text[j])
if other_word in _keywords and other_word == split_text[j].decode("utf-8"):
combined_word.append(other_word)
else:
for keyword in combined_word:
_keywords.pop(keyword)
result.append(" ".join(combined_word))
break
return result
def _get_average_score(concept, _keywords):
word_list = concept.split()
word_counter = 0
total = 0
for word in word_list:
total += _keywords[word]
word_counter += 1
return total / word_counter
def _format_results(_keywords, combined_keywords, split, scores):
"""
:param keywords:dict of keywords:scores
:param combined_keywords:list of word/s
"""
combined_keywords.sort(key=lambda w: _get_average_score(w, _keywords), reverse=True)
if scores:
return [(word, _get_average_score(word, _keywords)) for word in combined_keywords]
if split:
return combined_keywords
return "\n".join(combined_keywords)
def keywords(text, ratio=0.2, words=None, split=False, scores=False):
# Gets a dict of word -> lemma
tokens = _clean_text_by_word(text)
split_text = list(_tokenize_by_word(text))
# Creates the graph and adds the edges
graph = _build_graph(_get_words_for_graph(tokens))
_set_graph_edges(graph, tokens, split_text)
del split_text # It's no longer used
_remove_unreachable_nodes(graph)
# Ranks the tokens using the PageRank algorithm. Returns dict of lemma -> score
pagerank_scores = _pagerank(graph)
extracted_lemmas = _extract_tokens(graph.nodes(), pagerank_scores, ratio, words)
lemmas_to_word = _lemmas_to_words(tokens)
keywords = _get_keywords_with_score(extracted_lemmas, lemmas_to_word)
# text.split() to keep numbers and punctuation marks, so separeted concepts are not combined
combined_keywords = _get_combined_keywords(keywords, text.split())
return _format_results(keywords, combined_keywords, split, scores)
def get_graph(text):
tokens = _clean_text_by_word(text)
split_text = list(_tokenize_by_word(text))
graph = _build_graph(_get_words_for_graph(tokens))
_set_graph_edges(graph, tokens, split_text)
return graph
| gpl-3.0 |
lmcro/webserver | qa/127-SCGI-Post.py | 8 | 1736 | import os
from base import *
DIR = "/SCGI2/"
MAGIC = "Cherokee and SCGI rocks!"
PORT = get_free_port()
PYTHON = look_for_python()
SCRIPT = """
from pyscgi import *
class TestHandler (SCGIHandler):
def handle_request (self):
self.handle_post()
self.send('Content-Type: text/plain\\r\\n\\r\\n')
self.send('Post: %%s\\n' %% (self.post))
SCGIServer(TestHandler, port=%d).serve_forever()
""" % (PORT)
source = get_next_source()
CONF = """
vserver!1!rule!1270!match = directory
vserver!1!rule!1270!match!directory = %(DIR)s
vserver!1!rule!1270!handler = scgi
vserver!1!rule!1270!handler!balancer = round_robin
vserver!1!rule!1270!handler!balancer!source!1 = %(source)d
source!%(source)d!type = interpreter
source!%(source)d!host = localhost:%(PORT)d
source!%(source)d!interpreter = %(PYTHON)s %(scgi_file)s
"""
class Test (TestBase):
def __init__ (self):
TestBase.__init__ (self, __file__)
self.name = "SCGI II: Post"
self.request = "POST %s HTTP/1.0\r\n" %(DIR) +\
"Content-type: application/x-www-form-urlencoded\r\n" +\
"Content-length: %d\r\n" % (len(MAGIC))
self.post = MAGIC
self.expected_error = 200
self.expected_content = "Post: "+MAGIC
self.forbidden_content = ["pyscgi", "SCGIServer", "write"]
def Prepare (self, www):
scgi_file = self.WriteFile (www, "scgi_test2.scgi", 0444, SCRIPT)
pyscgi = os.path.join (www, 'pyscgi.py')
if not os.path.exists (pyscgi):
self.CopyFile ('pyscgi.py', pyscgi)
vars = globals()
vars['scgi_file'] = scgi_file
self.conf = CONF % (vars)
| gpl-2.0 |
brianrodri/oppia | core/domain/auth_domain.py | 2 | 8845 | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects for authentication."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import collections
import python_utils
import utils
# Auth ID refers to an identifier that links many Identity Providers to a single
# user. For example, an individual user's Facebook, Google, and Apple profiles
# would all map to a single Auth ID.
#
# Auth IDs are handled by the sub-modules in `core.platform.auth`.
#
# This domain object is simply a convenience for pairing Auth IDs to their
# corresponding Oppia-generated IDs in our APIs.
AuthIdUserIdPair = (
collections.namedtuple('AuthIdUserIdPair', ['auth_id', 'user_id']))
class InvalidAuthSessionError(Exception):
"""Error raised when an invalid auth session is detected."""
pass
class StaleAuthSessionError(Exception):
"""Error raised when an auth session needs to be refreshed."""
pass
class AuthClaims(python_utils.OBJECT):
"""Domain object for holding onto essential Claims about an authorized user.
A Claim is a piece of information about a user (e.g. name, mailing address,
phone number).
Attributes:
auth_id: str. A unique identifier provided by an identity provider that
is associated with the user. The ID is only unique with respect to
the identity provider that produced it.
email: str|None. The email address associated with the user, if any.
role_is_super_admin: bool. Whether the user has super admin privileges.
"""
def __init__(self, auth_id, email, role_is_super_admin):
if not auth_id:
raise Exception('auth_id must not be empty')
self.auth_id = auth_id
self.email = email
self.role_is_super_admin = role_is_super_admin
def __repr__(self):
return 'AuthClaims(auth_id=%r, email=%r, role_is_super_admin=%r)' % (
self.auth_id, self.email, self.role_is_super_admin)
def __hash__(self):
return hash((self.auth_id, self.email, self.role_is_super_admin))
def __eq__(self, other):
# https://docs.python.org/2/library/constants.html#NotImplemented.
return NotImplemented if not isinstance(other, AuthClaims) else (
(self.auth_id, self.email, self.role_is_super_admin) ==
(other.auth_id, other.email, other.role_is_super_admin))
def __ne__(self, other):
# TODO(#11474): Delete this method once we've moved to Python 3 and rely
# on auto-generated method. In Python 2, we need to write this method
# ourselves: https://stackoverflow.com/a/30676267/4859885.
return not self == other
class UserAuthDetails(python_utils.OBJECT):
"""Domain object representing a user's authentication details.
There are two distinct types of user accounts: "full" and "profile".
full: An account that is directly associated with an identity provider.
The provider's auth_id value will be kept in its corresponding
property (e.g. gae_id for Google AppEngine authentication and
firebase_auth_id for Firebase authentication).
profile: An account that depends on its parent user for authentication.
These accounts are not directly associated with an identity
provider.
The distinction between profile and full user accounts are enforced through
invariants on the properties: auth_id and parent_user_id (where auth_id is:
gae_id or firebase_auth_id).
Specifically: (parent_user_id is not None) if and only if (auth_id is None).
"""
def __init__(
self, user_id, gae_id, firebase_auth_id, parent_user_id,
deleted=False):
self.user_id = user_id
self.gae_id = gae_id
self.firebase_auth_id = firebase_auth_id
self.parent_user_id = parent_user_id
self.deleted = deleted
def __repr__(self):
return (
'UserAuthDetails(user_id=%r, gae_id=%r, firebase_auth_id=%r, '
'parent_user_id=%r, deleted=%r)' % (
self.user_id, self.gae_id, self.firebase_auth_id,
self.parent_user_id, self.deleted))
def validate(self):
"""Checks whether user_id, gae_id, firebase_auth_id, and parent_user_id
are valid.
Raises:
ValidationError. The user_id is not specified.
ValidationError. The user_id is not a string.
ValidationError. The user_id has the wrong format.
ValidationError. The gae_id is not a string.
ValidationError. The firebase_auth_id is not a string.
ValidationError. The parent_user_id has the wrong format.
ValidationError. The parent_user_id is set for a full user.
ValidationError. The parent_user_id is not set for a profile user.
"""
if not self.user_id:
raise utils.ValidationError('No user_id specified')
if not isinstance(self.user_id, python_utils.BASESTRING):
raise utils.ValidationError(
'user_id must be a string, but got %r' % self.user_id)
if not utils.is_user_id_valid(self.user_id):
raise utils.ValidationError(
'user_id=%r has the wrong format' % self.user_id)
if (self.gae_id is not None and
not isinstance(self.gae_id, python_utils.BASESTRING)):
raise utils.ValidationError(
'gae_id must be a string, but got %r' % self.gae_id)
if (self.firebase_auth_id is not None and
not isinstance(self.firebase_auth_id, python_utils.BASESTRING)):
raise utils.ValidationError(
'firebase_auth_id must be a string, but got %r' %
self.firebase_auth_id)
if (self.parent_user_id is not None and
not utils.is_user_id_valid(self.parent_user_id)):
raise utils.ValidationError(
'parent_user_id=%r has the wrong format' % self.parent_user_id)
if self.is_full_user() and self.parent_user_id is not None:
raise utils.ValidationError(
'parent_user_id must not be set for a full user, but got '
'gae_id=%r, firebase_auth_id=%r, parent_user_id=%r' % (
self.gae_id, self.firebase_auth_id, self.parent_user_id))
if not self.is_full_user() and self.parent_user_id is None:
raise utils.ValidationError(
'parent_user_id must be set for a profile user, but got '
'gae_id=%r, firebase_auth_id=%r, parent_user_id=%r' % (
self.gae_id, self.firebase_auth_id, self.parent_user_id))
@property
def auth_id(self):
"""Returns the auth ID corresponding to the user account, if any.
This method is a utility for simplifying code that doesn't care about
which identity provider the auth ID came from.
Returns:
str. Returns firebase_auth_id if it is not None, otherwise gae_id.
"""
return self.firebase_auth_id or self.gae_id
def is_full_user(self):
"""Returns whether self refers to a full user account."""
return self.auth_id is not None
def to_dict(self):
"""Returns values corresponding to UserAuthDetailsModel's properties.
This method is a utility for assigning values to UserAuthDetailsModel:
user_auth_details.validate()
user_auth_details_model.populate(**user_auth_details.to_dict())
NOTE: The dict returned does not include user_id because that value is
UserAuthDetailsModel's key. Keys are distinct from normal properties,
and cannot be re-assigned using the `populate()` method; trying to
assign to it will raise an exception.
Returns:
dict(str:*). A dict of values from self using UserAuthDetailsModel
property names as keys.
"""
return {
'gae_id': self.gae_id,
'firebase_auth_id': self.firebase_auth_id,
'parent_user_id': self.parent_user_id,
'deleted': self.deleted,
}
| apache-2.0 |
gangadharkadam/v5_erp | erpnext/tests/sel_tests.py | 41 | 2553 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
"""
Run Selenium Tests
Requires a clean install. After reinstalling fresh db, call
frappe --execute erpnext.tests.sel_tests.start
"""
from __future__ import unicode_literals
import frappe
from frappe.utils import sel
import time
def start():
try:
run()
finally:
sel.close()
def run():
def next_slide(idx, selector="next-btn"):
sel.find('[data-slide-id="{0}"] .{1}'.format(idx, selector))[0].click()
sel.wait_for_ajax()
sel.start(verbose=True, driver="Firefox")
sel.input_wait = 0.2
sel.login("#page-setup-wizard")
# slide 1
next_slide("0")
sel.set_field("first_name", "Test")
sel.set_field("last_name", "User")
sel.set_field("email", "test@erpnext.com")
sel.set_field("password", "test")
next_slide("1")
sel.set_select("country", "India")
next_slide("2")
sel.set_field("company_name", "Wind Power LLC")
sel.set_field("fy_start_date", "01-04-2014")
sel.set_field("company_tagline", "Wind Power For Everyone")
next_slide("3")
next_slide("4")
sel.set_field("tax_1", "VAT")
sel.set_field("tax_rate_1", "12.5")
sel.set_field("tax_2", "Service Tax")
sel.set_field("tax_rate_2", "10.36")
next_slide("5")
sel.set_field("customer_1", "Asian Junction")
sel.set_field("customer_contact_1", "January Vaclavik")
sel.set_field("customer_2", "Life Plan Counselling")
sel.set_field("customer_contact_2", "Jana Tobeolisa")
sel.set_field("customer_3", "Two Pesos")
sel.set_field("customer_contact_3", "Satomi Shigeki")
sel.set_field("customer_4", "Intelacard")
sel.set_field("customer_contact_4", "Hans Rasmussen")
next_slide("6")
sel.set_field("item_1", "Wind Turbine A")
sel.set_field("item_2", "Wind Turbine B")
sel.set_field("item_3", "Wind Turbine C")
next_slide("7")
sel.set_field("supplier_1", "Helios Air")
sel.set_field("supplier_contact_1", "Quimey Osorio")
sel.set_field("supplier_2", "Ks Merchandise")
sel.set_field("supplier_contact_2", "Edgarda Salcedo")
sel.set_field("supplier_3", "Eagle Hardware")
sel.set_field("supplier_contact_3", "Hafsteinn Bjarnarsonar")
next_slide("8")
sel.set_field("item_buy_1", "Bearing Pipe")
sel.set_field("item_buy_2", "Bearing Assembly")
sel.set_field("item_buy_3", "Base Plate")
sel.set_field("item_buy_4", "Coil")
next_slide("9", "complete-btn")
sel.wait('[data-state="setup-complete"]')
w = raw_input("quit?")
# complete setup
# new customer
# new supplier
# new item
# sales cycle
# purchase cycle
| agpl-3.0 |
Ra1nWarden/googletest | test/gtest_env_var_test.py | 2408 | 3487 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
madprog/virtualenv | virtualenv_embedded/site.py | 784 | 27543 | """Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
In earlier versions of Python (up to 1.5a3), scripts or modules that
needed to use site-specific modules would place ``import site''
somewhere near the top of their code. Because of the automatic
import, this is no longer necessary (but code that does it still
works).
This will append site-specific paths to the module search path. On
Unix, it starts with sys.prefix and sys.exec_prefix (if different) and
appends lib/python<version>/site-packages as well as lib/site-python.
It also supports the Debian convention of
lib/python<version>/dist-packages. On other platforms (mainly Mac and
Windows), it uses just sys.prefix (and sys.exec_prefix, if different,
but this is unlikely). The resulting directories, if they exist, are
appended to sys.path, and also inspected for path configuration files.
FOR DEBIAN, this sys.path is augmented with directories in /usr/local.
Local addons go into /usr/local/lib/python<version>/site-packages
(resp. /usr/local/lib/site-python), Debian addons install into
/usr/{lib,share}/python<version>/dist-packages.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.X/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.X/site-packages/bar
/usr/local/lib/python2.X/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
After these path manipulations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys
import os
try:
import __builtin__ as builtins
except ImportError:
import builtins
try:
set
except NameError:
from sets import Set as set
# Prefixes for site-packages; add additional prefixes like /usr/local here
PREFIXES = [sys.prefix, sys.exec_prefix]
# Enable per user site-packages directory
# set it to False to disable the feature or True to force the feature
ENABLE_USER_SITE = None
# for distutils.commands.install
USER_SITE = None
USER_BASE = None
_is_64bit = (getattr(sys, 'maxsize', None) or getattr(sys, 'maxint')) > 2**32
_is_pypy = hasattr(sys, 'pypy_version_info')
_is_jython = sys.platform[:4] == 'java'
if _is_jython:
ModuleType = type(os)
def makepath(*paths):
dir = os.path.join(*paths)
if _is_jython and (dir == '__classpath__' or
dir.startswith('__pyclasspath__')):
return dir, dir
dir = os.path.abspath(dir)
return dir, os.path.normcase(dir)
def abs__file__():
"""Set all module' __file__ attribute to an absolute path"""
for m in sys.modules.values():
if ((_is_jython and not isinstance(m, ModuleType)) or
hasattr(m, '__loader__')):
# only modules need the abspath in Jython. and don't mess
# with a PEP 302-supplied __file__
continue
f = getattr(m, '__file__', None)
if f is None:
continue
m.__file__ = os.path.abspath(f)
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
# XXX This should not be part of site.py, since it is needed even when
# using the -S option for Python. See http://www.python.org/sf/586680
def addbuilddir():
"""Append ./build/lib.<platform> in case we're running in the build dir
(especially for Guido :-)"""
from distutils.util import get_platform
s = "build/lib.%s-%.3s" % (get_platform(), sys.version)
if hasattr(sys, 'gettotalrefcount'):
s += '-pydebug'
s = os.path.join(os.path.dirname(sys.path[-1]), s)
sys.path.append(s)
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Add a new path to known_paths by combining sitedir and 'name' or execute
sitedir if it starts with 'import'"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "rU")
except IOError:
return
try:
for line in f:
if line.startswith("#"):
continue
if line.startswith("import"):
exec(line)
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
finally:
f.close()
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
names.sort()
for name in names:
if name.endswith(os.extsep + "pth"):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def addsitepackages(known_paths, sys_prefix=sys.prefix, exec_prefix=sys.exec_prefix):
"""Add site-packages (and possibly site-python) to sys.path"""
prefixes = [os.path.join(sys_prefix, "local"), sys_prefix]
if exec_prefix != sys_prefix:
prefixes.append(os.path.join(exec_prefix, "local"))
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')]
elif sys.platform == 'darwin' and prefix == sys_prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(prefix, "Extras", "lib", "python")]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib",
"python" + sys.version[:3], "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python"),
os.path.join(prefix, "python" + sys.version[:3], "lib-dynload")]
lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
if (os.path.exists(lib64_dir) and
os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):
if _is_64bit:
sitedirs.insert(0, lib64_dir)
else:
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
except AttributeError:
pass
# Debian-specific dist-packages directories:
sitedirs.append(os.path.join(prefix, "local/lib",
"python" + sys.version[:3],
"dist-packages"))
if sys.version[0] == '2':
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"dist-packages"))
else:
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[0],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return None
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if hasattr(sys, 'flags') and getattr(sys.flags, 'no_user_site', False):
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
USER_BASE is the root directory for all Python versions
USER_SITE is the user specific site-packages directory
USER_SITE/.. can be used for data.
"""
global USER_BASE, USER_SITE, ENABLE_USER_SITE
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
#if sys.platform in ('os2emx', 'riscos'):
# # Don't know what to put here
# USER_BASE = ''
# USER_SITE = ''
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser(base, "Python")
USER_SITE = os.path.join(USER_BASE,
"Python" + sys.version[0] + sys.version[2],
"site-packages")
else:
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser("~", ".local")
USER_SITE = os.path.join(USER_BASE, "lib",
"python" + sys.version[:3],
"site-packages")
if ENABLE_USER_SITE and os.path.isdir(USER_SITE):
addsitedir(USER_SITE, known_paths)
if ENABLE_USER_SITE:
for dist_libdir in ("lib", "local/lib"):
user_site = os.path.join(USER_BASE, dist_libdir,
"python" + sys.version[:3],
"dist-packages")
if os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths
def setBEGINLIBPATH():
"""The OS/2 EMX port has optional extension modules that do double duty
as DLLs (and must use the .DLL file extension) for other extensions.
The library search path needs to be amended so these will be found
during module import. Use BEGINLIBPATH so that these are at the start
of the library search path.
"""
dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload")
libpath = os.environ['BEGINLIBPATH'].split(';')
if libpath[-1]:
libpath.append(dllpath)
else:
libpath[-1] = dllpath
os.environ['BEGINLIBPATH'] = ';'.join(libpath)
def setquit():
"""Define new built-ins 'quit' and 'exit'.
These are simply strings that display a hint on how to exit.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
builtins.quit = Quitter('quit')
builtins.exit = Quitter('exit')
class _Printer(object):
"""interactive prompt objects for printing the license text, a list of
contributors and the copyright notice."""
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for filename in self.__files:
filename = os.path.join(dir, filename)
try:
fp = open(filename, "rU")
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print(self.__lines[i])
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
try:
key = raw_input(prompt)
except NameError:
key = input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
def setcopyright():
"""Set 'copyright' and 'credits' in __builtin__"""
builtins.copyright = _Printer("copyright", sys.copyright)
if _is_jython:
builtins.credits = _Printer(
"credits",
"Jython is maintained by the Jython developers (www.jython.org).")
elif _is_pypy:
builtins.credits = _Printer(
"credits",
"PyPy is maintained by the PyPy developers: http://pypy.org/")
else:
builtins.credits = _Printer("credits", """\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.""")
here = os.path.dirname(os.__file__)
builtins.license = _Printer(
"license", "See http://www.python.org/%.3s/license.html" % sys.version,
["LICENSE.txt", "LICENSE"],
[os.path.join(here, os.pardir), here, os.curdir])
class _Helper(object):
"""Define the built-in 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
builtins.help = _Helper()
def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
def setencoding():
"""Set the string encoding used by the Unicode implementation. The
default is 'ascii', but if you're willing to experiment, you can
change this."""
encoding = "ascii" # Default value set by _PyUnicode_Init()
if 0:
# Enable to support locale aware default string encodings.
import locale
loc = locale.getdefaultlocale()
if loc[1]:
encoding = loc[1]
if 0:
# Enable to switch off string to Unicode coercion and implicit
# Unicode to string conversion.
encoding = "undefined"
if encoding != "ascii":
# On Non-Unicode builds this will raise an AttributeError...
sys.setdefaultencoding(encoding) # Needs Python Unicode build !
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except ImportError:
pass
def virtual_install_main_packages():
f = open(os.path.join(os.path.dirname(__file__), 'orig-prefix.txt'))
sys.real_prefix = f.read().strip()
f.close()
pos = 2
hardcoded_relative_dirs = []
if sys.path[0] == '':
pos += 1
if _is_jython:
paths = [os.path.join(sys.real_prefix, 'Lib')]
elif _is_pypy:
if sys.version_info > (3, 2):
cpyver = '%d' % sys.version_info[0]
elif sys.pypy_version_info >= (1, 5):
cpyver = '%d.%d' % sys.version_info[:2]
else:
cpyver = '%d.%d.%d' % sys.version_info[:3]
paths = [os.path.join(sys.real_prefix, 'lib_pypy'),
os.path.join(sys.real_prefix, 'lib-python', cpyver)]
if sys.pypy_version_info < (1, 9):
paths.insert(1, os.path.join(sys.real_prefix,
'lib-python', 'modified-%s' % cpyver))
hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below
#
# This is hardcoded in the Python executable, but relative to sys.prefix:
for path in paths[:]:
plat_path = os.path.join(path, 'plat-%s' % sys.platform)
if os.path.exists(plat_path):
paths.append(plat_path)
elif sys.platform == 'win32':
paths = [os.path.join(sys.real_prefix, 'Lib'), os.path.join(sys.real_prefix, 'DLLs')]
else:
paths = [os.path.join(sys.real_prefix, 'lib', 'python'+sys.version[:3])]
hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below
lib64_path = os.path.join(sys.real_prefix, 'lib64', 'python'+sys.version[:3])
if os.path.exists(lib64_path):
if _is_64bit:
paths.insert(0, lib64_path)
else:
paths.append(lib64_path)
# This is hardcoded in the Python executable, but relative to
# sys.prefix. Debian change: we need to add the multiarch triplet
# here, which is where the real stuff lives. As per PEP 421, in
# Python 3.3+, this lives in sys.implementation, while in Python 2.7
# it lives in sys.
try:
arch = getattr(sys, 'implementation', sys)._multiarch
except AttributeError:
# This is a non-multiarch aware Python. Fallback to the old way.
arch = sys.platform
plat_path = os.path.join(sys.real_prefix, 'lib',
'python'+sys.version[:3],
'plat-%s' % arch)
if os.path.exists(plat_path):
paths.append(plat_path)
# This is hardcoded in the Python executable, but
# relative to sys.prefix, so we have to fix up:
for path in list(paths):
tk_dir = os.path.join(path, 'lib-tk')
if os.path.exists(tk_dir):
paths.append(tk_dir)
# These are hardcoded in the Apple's Python executable,
# but relative to sys.prefix, so we have to fix them up:
if sys.platform == 'darwin':
hardcoded_paths = [os.path.join(relative_dir, module)
for relative_dir in hardcoded_relative_dirs
for module in ('plat-darwin', 'plat-mac', 'plat-mac/lib-scriptpackages')]
for path in hardcoded_paths:
if os.path.exists(path):
paths.append(path)
sys.path.extend(paths)
def force_global_eggs_after_local_site_packages():
"""
Force easy_installed eggs in the global environment to get placed
in sys.path after all packages inside the virtualenv. This
maintains the "least surprise" result that packages in the
virtualenv always mask global packages, never the other way
around.
"""
egginsert = getattr(sys, '__egginsert', 0)
for i, path in enumerate(sys.path):
if i > egginsert and path.startswith(sys.prefix):
egginsert = i
sys.__egginsert = egginsert + 1
def virtual_addsitepackages(known_paths):
force_global_eggs_after_local_site_packages()
return addsitepackages(known_paths, sys_prefix=sys.real_prefix)
def fixclasspath():
"""Adjust the special classpath sys.path entries for Jython. These
entries should follow the base virtualenv lib directories.
"""
paths = []
classpaths = []
for path in sys.path:
if path == '__classpath__' or path.startswith('__pyclasspath__'):
classpaths.append(path)
else:
paths.append(path)
sys.path = paths
sys.path.extend(classpaths)
def execusercustomize():
"""Run custom user specific code, if available."""
try:
import usercustomize
except ImportError:
pass
def main():
global ENABLE_USER_SITE
virtual_install_main_packages()
abs__file__()
paths_in_sys = removeduppaths()
if (os.name == "posix" and sys.path and
os.path.basename(sys.path[-1]) == "Modules"):
addbuilddir()
if _is_jython:
fixclasspath()
GLOBAL_SITE_PACKAGES = not os.path.exists(os.path.join(os.path.dirname(__file__), 'no-global-site-packages.txt'))
if not GLOBAL_SITE_PACKAGES:
ENABLE_USER_SITE = False
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
paths_in_sys = addsitepackages(paths_in_sys)
paths_in_sys = addusersitepackages(paths_in_sys)
if GLOBAL_SITE_PACKAGES:
paths_in_sys = virtual_addsitepackages(paths_in_sys)
if sys.platform == 'os2emx':
setBEGINLIBPATH()
setquit()
setcopyright()
sethelper()
aliasmbcs()
setencoding()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
# Remove sys.setdefaultencoding() so that users cannot change the
# encoding after initialization. The test for presence is needed when
# this module is run as a script, because this code is executed twice.
if hasattr(sys, "setdefaultencoding"):
del sys.setdefaultencoding
main()
def _script():
help = """\
%s [--user-base] [--user-site]
Without arguments print some useful information
With arguments print the value of USER_BASE and/or USER_SITE separated
by '%s'.
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
2 - uses site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
args = sys.argv[1:]
if not args:
print("sys.path = [")
for dir in sys.path:
print(" %r," % (dir,))
print("]")
def exists(path):
if os.path.isdir(path):
return "exists"
else:
return "doesn't exist"
print("USER_BASE: %r (%s)" % (USER_BASE, exists(USER_BASE)))
print("USER_SITE: %r (%s)" % (USER_SITE, exists(USER_BASE)))
print("ENABLE_USER_SITE: %r" % ENABLE_USER_SITE)
sys.exit(0)
buffer = []
if '--user-base' in args:
buffer.append(USER_BASE)
if '--user-site' in args:
buffer.append(USER_SITE)
if buffer:
print(os.pathsep.join(buffer))
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print(textwrap.dedent(help % (sys.argv[0], os.pathsep)))
sys.exit(10)
if __name__ == '__main__':
_script()
| mit |
cosmoharrigan/pylearn2 | pylearn2/gui/tangent_plot.py | 44 | 1730 | """
Code for plotting curves with tangent lines.
"""
__author__ = "Ian Goodfellow"
try:
from matplotlib import pyplot
except Exception:
pyplot = None
from theano.compat.six.moves import xrange
def tangent_plot(x, y, s):
"""
Plots a curve with tangent lines.
Parameters
----------
x : list
List of x coordinates.
Assumed to be sorted into ascending order, so that the tangent
lines occupy 80 percent of the horizontal space between each pair
of points.
y : list
List of y coordinates
s : list
List of slopes
"""
assert isinstance(x, list)
assert isinstance(y, list)
assert isinstance(s, list)
n = len(x)
assert len(y) == n
assert len(s) == n
if pyplot is None:
raise RuntimeError("Could not import pyplot, can't run this code.")
pyplot.plot(x, y, color='b')
if n == 0:
pyplot.show()
return
pyplot.hold(True)
# Add dummy entries so that the for loop can use the same code on every
# entry
if n == 1:
x = [x[0] - 1] + x[0] + [x[0] + 1.]
else:
x = [x[0] - (x[1] - x[0])] + x + [x[-2] + (x[-1] - x[-2])]
y = [0.] + y + [0]
s = [0.] + s + [0]
for i in xrange(1, n + 1):
ld = 0.4 * (x[i] - x[i - 1])
lx = x[i] - ld
ly = y[i] - ld * s[i]
rd = 0.4 * (x[i + 1] - x[i])
rx = x[i] + rd
ry = y[i] + rd * s[i]
pyplot.plot([lx, rx], [ly, ry], color='g')
pyplot.show()
if __name__ == "__main__":
# Demo by plotting a quadratic function
import numpy as np
x = np.arange(-5., 5., .1)
y = 0.5 * (x ** 2)
x = list(x)
y = list(y)
tangent_plot(x, y, x)
| bsd-3-clause |
erikrose/pip | pip/baseparser.py | 424 | 10465 | """Base option parser setup"""
from __future__ import absolute_import
import sys
import optparse
import os
import re
import textwrap
from distutils.util import strtobool
from pip._vendor.six import string_types
from pip._vendor.six.moves import configparser
from pip.locations import (
legacy_config_file, config_basename, running_under_virtualenv,
site_config_files
)
from pip.utils import appdirs, get_terminal_size
_environ_prefix_re = re.compile(r"^PIP_", re.I)
class PrettyHelpFormatter(optparse.IndentedHelpFormatter):
"""A prettier/less verbose help formatter for optparse."""
def __init__(self, *args, **kwargs):
# help position must be aligned with __init__.parseopts.description
kwargs['max_help_position'] = 30
kwargs['indent_increment'] = 1
kwargs['width'] = get_terminal_size()[0] - 2
optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs)
def format_option_strings(self, option):
return self._format_option_strings(option, ' <%s>', ', ')
def _format_option_strings(self, option, mvarfmt=' <%s>', optsep=', '):
"""
Return a comma-separated list of option strings and metavars.
:param option: tuple of (short opt, long opt), e.g: ('-f', '--format')
:param mvarfmt: metavar format string - evaluated as mvarfmt % metavar
:param optsep: separator
"""
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, optsep)
if option.takes_value():
metavar = option.metavar or option.dest.lower()
opts.append(mvarfmt % metavar.lower())
return ''.join(opts)
def format_heading(self, heading):
if heading == 'Options':
return ''
return heading + ':\n'
def format_usage(self, usage):
"""
Ensure there is only one newline between usage and the first heading
if there is no description.
"""
msg = '\nUsage: %s\n' % self.indent_lines(textwrap.dedent(usage), " ")
return msg
def format_description(self, description):
# leave full control over description to us
if description:
if hasattr(self.parser, 'main'):
label = 'Commands'
else:
label = 'Description'
# some doc strings have initial newlines, some don't
description = description.lstrip('\n')
# some doc strings have final newlines and spaces, some don't
description = description.rstrip()
# dedent, then reindent
description = self.indent_lines(textwrap.dedent(description), " ")
description = '%s:\n%s\n' % (label, description)
return description
else:
return ''
def format_epilog(self, epilog):
# leave full control over epilog to us
if epilog:
return epilog
else:
return ''
def indent_lines(self, text, indent):
new_lines = [indent + line for line in text.split('\n')]
return "\n".join(new_lines)
class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter):
"""Custom help formatter for use in ConfigOptionParser.
This is updates the defaults before expanding them, allowing
them to show up correctly in the help listing.
"""
def expand_default(self, option):
if self.parser is not None:
self.parser._update_defaults(self.parser.defaults)
return optparse.IndentedHelpFormatter.expand_default(self, option)
class CustomOptionParser(optparse.OptionParser):
def insert_option_group(self, idx, *args, **kwargs):
"""Insert an OptionGroup at a given position."""
group = self.add_option_group(*args, **kwargs)
self.option_groups.pop()
self.option_groups.insert(idx, group)
return group
@property
def option_list_all(self):
"""Get a list of all options, including those in option groups."""
res = self.option_list[:]
for i in self.option_groups:
res.extend(i.option_list)
return res
class ConfigOptionParser(CustomOptionParser):
"""Custom option parser which updates its defaults by checking the
configuration files and environmental variables"""
isolated = False
def __init__(self, *args, **kwargs):
self.config = configparser.RawConfigParser()
self.name = kwargs.pop('name')
self.isolated = kwargs.pop("isolated", False)
self.files = self.get_config_files()
if self.files:
self.config.read(self.files)
assert self.name
optparse.OptionParser.__init__(self, *args, **kwargs)
def get_config_files(self):
# the files returned by this method will be parsed in order with the
# first files listed being overridden by later files in standard
# ConfigParser fashion
config_file = os.environ.get('PIP_CONFIG_FILE', False)
if config_file == os.devnull:
return []
# at the base we have any site-wide configuration
files = list(site_config_files)
# per-user configuration next
if not self.isolated:
if config_file and os.path.exists(config_file):
files.append(config_file)
else:
# This is the legacy config file, we consider it to be a lower
# priority than the new file location.
files.append(legacy_config_file)
# This is the new config file, we consider it to be a higher
# priority than the legacy file.
files.append(
os.path.join(
appdirs.user_config_dir("pip"),
config_basename,
)
)
# finally virtualenv configuration first trumping others
if running_under_virtualenv():
venv_config_file = os.path.join(
sys.prefix,
config_basename,
)
if os.path.exists(venv_config_file):
files.append(venv_config_file)
return files
def check_default(self, option, key, val):
try:
return option.check_value(key, val)
except optparse.OptionValueError as exc:
print("An error occurred during configuration: %s" % exc)
sys.exit(3)
def _update_defaults(self, defaults):
"""Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists)."""
# Then go and look for the other sources of configuration:
config = {}
# 1. config files
for section in ('global', self.name):
config.update(
self.normalize_keys(self.get_config_section(section))
)
# 2. environmental variables
if not self.isolated:
config.update(self.normalize_keys(self.get_environ_vars()))
# Accumulate complex default state.
self.values = optparse.Values(self.defaults)
late_eval = set()
# Then set the options with those values
for key, val in config.items():
# ignore empty values
if not val:
continue
option = self.get_option(key)
# Ignore options not present in this parser. E.g. non-globals put
# in [global] by users that want them to apply to all applicable
# commands.
if option is None:
continue
if option.action in ('store_true', 'store_false', 'count'):
val = strtobool(val)
elif option.action == 'append':
val = val.split()
val = [self.check_default(option, key, v) for v in val]
elif option.action == 'callback':
late_eval.add(option.dest)
opt_str = option.get_opt_string()
val = option.convert_value(opt_str, val)
# From take_action
args = option.callback_args or ()
kwargs = option.callback_kwargs or {}
option.callback(option, opt_str, val, self, *args, **kwargs)
else:
val = self.check_default(option, key, val)
defaults[option.dest] = val
for key in late_eval:
defaults[key] = getattr(self.values, key)
self.values = None
return defaults
def normalize_keys(self, items):
"""Return a config dictionary with normalized keys regardless of
whether the keys were specified in environment variables or in config
files"""
normalized = {}
for key, val in items:
key = key.replace('_', '-')
if not key.startswith('--'):
key = '--%s' % key # only prefer long opts
normalized[key] = val
return normalized
def get_config_section(self, name):
"""Get a section of a configuration"""
if self.config.has_section(name):
return self.config.items(name)
return []
def get_environ_vars(self):
"""Returns a generator with all environmental vars with prefix PIP_"""
for key, val in os.environ.items():
if _environ_prefix_re.search(key):
yield (_environ_prefix_re.sub("", key).lower(), val)
def get_default_values(self):
"""Overridding to make updating the defaults after instantiation of
the option parser possible, _update_defaults() does the dirty work."""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
defaults = self._update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, string_types):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
def error(self, msg):
self.print_usage(sys.stderr)
self.exit(2, "%s\n" % msg)
| mit |
trustly/trustly-client-python | trustly/data/jsonrpcnotificationrequest.py | 1 | 3241 | """
The MIT License (MIT)
Copyright (c) 2014 Trustly Group AB
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import absolute_import
import json
import trustly.data
class JSONRPCNotificationRequest(trustly.data.data.Data):
notification_body = None
def __init__(self, notification_body):
super(JSONRPCNotificationRequest, self).__init__()
self.notification_body = notification_body
try:
payload = json.loads(self.notification_body)
if payload is not None:
self.payload = payload
except ValueError as e:
raise trustly.exceptions.TrustlyDataError(str(e))
if self.get_version() != '1.1':
raise trustly.exceptions.TrustlyJSONRPCVersionError('JSON RPC Version {0} is not supported'.format(self.get_version()))
def get_params(self, name=None):
params = self.payload.get('params')
if name is None:
if params is not None:
return params.copy()
elif params is not None:
return params[name]
else:
raise KeyError('{0} is not present in params'.format(name))
return None
def get_data(self, name=None):
params = self.payload.get('params')
data = None
if params is not None:
data = params.get('data')
if name is None:
if data is not None:
return data.copy()
elif data is not None:
return data[name]
else:
raise KeyError('{0} is not present in data'.format(name))
return None
def get_uuid(self):
try:
return self.get_params('uuid')
except KeyError as e:
pass
return None
def get_method(self):
try:
return self.get('method')
except KeyError as e:
pass
return None
def get_signature(self):
try:
return self.get_params('signature')
except KeyError as e:
pass
return None
def get_version(self):
try:
return self.get('version')
except KeyError as e:
pass
return None
# vim: set et cindent ts=4 ts=4 sw=4:
| mit |
dremio/arrow | integration/integration_test.py | 4 | 33972 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import argparse
import binascii
import glob
import itertools
import json
import os
import random
import six
import string
import subprocess
import tempfile
import uuid
import errno
import numpy as np
ARROW_HOME = os.path.abspath(__file__).rsplit("/", 2)[0]
# Control for flakiness
np.random.seed(12345)
def load_version_from_pom():
import xml.etree.ElementTree as ET
tree = ET.parse(os.path.join(ARROW_HOME, 'java', 'pom.xml'))
tag_pattern = '{http://maven.apache.org/POM/4.0.0}version'
version_tag = list(tree.getroot().findall(tag_pattern))[0]
return version_tag.text
def guid():
return uuid.uuid4().hex
# from pandas
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def tobytes(o):
if isinstance(o, six.text_type):
return o.encode('utf8')
return o
def frombytes(o):
if isinstance(o, six.binary_type):
return o.decode('utf8')
return o
# from the merge_arrow_pr.py script
def run_cmd(cmd):
if isinstance(cmd, six.string_types):
cmd = cmd.split(' ')
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
# this avoids hiding the stdout / stderr of failed processes
print('Command failed: %s' % ' '.join(cmd))
print('With output:')
print('--------------')
print(frombytes(e.output))
print('--------------')
raise e
return frombytes(output)
# ----------------------------------------------------------------------
# Data generation
class DataType(object):
def __init__(self, name, nullable=True):
self.name = name
self.nullable = nullable
def get_json(self):
return OrderedDict([
('name', self.name),
('type', self._get_type()),
('nullable', self.nullable),
('children', self._get_children())
])
def _make_is_valid(self, size):
if self.nullable:
return np.random.randint(0, 2, size=size)
else:
return np.ones(size)
class Column(object):
def __init__(self, name, count):
self.name = name
self.count = count
def __len__(self):
return self.count
def _get_children(self):
return []
def _get_buffers(self):
return []
def get_json(self):
entries = [
('name', self.name),
('count', self.count)
]
buffers = self._get_buffers()
entries.extend(buffers)
children = self._get_children()
if len(children) > 0:
entries.append(('children', children))
return OrderedDict(entries)
class PrimitiveType(DataType):
def _get_children(self):
return []
class PrimitiveColumn(Column):
def __init__(self, name, count, is_valid, values):
super(PrimitiveColumn, self).__init__(name, count)
self.is_valid = is_valid
self.values = values
def _encode_value(self, x):
return x
def _get_buffers(self):
return [
('VALIDITY', [int(v) for v in self.is_valid]),
('DATA', list([self._encode_value(x) for x in self.values]))
]
TEST_INT_MAX = 2 ** 31 - 1
TEST_INT_MIN = ~TEST_INT_MAX
class IntegerType(PrimitiveType):
def __init__(self, name, is_signed, bit_width, nullable=True,
min_value=TEST_INT_MIN,
max_value=TEST_INT_MAX):
super(IntegerType, self).__init__(name, nullable=nullable)
self.is_signed = is_signed
self.bit_width = bit_width
self.min_value = min_value
self.max_value = max_value
def _get_generated_data_bounds(self):
signed_iinfo = np.iinfo('int' + str(self.bit_width))
if self.is_signed:
min_value, max_value = signed_iinfo.min, signed_iinfo.max
else:
# ARROW-1837 Remove this hack and restore full unsigned integer
# range
min_value, max_value = 0, signed_iinfo.max
lower_bound = max(min_value, self.min_value)
upper_bound = min(max_value, self.max_value)
return lower_bound, upper_bound
def _get_type(self):
return OrderedDict([
('name', 'int'),
('isSigned', self.is_signed),
('bitWidth', self.bit_width)
])
def generate_column(self, size, name=None):
lower_bound, upper_bound = self._get_generated_data_bounds()
return self.generate_range(size, lower_bound, upper_bound, name=name)
def generate_range(self, size, lower, upper, name=None):
values = [int(x) for x in
np.random.randint(lower, upper, size=size)]
is_valid = self._make_is_valid(size)
if name is None:
name = self.name
return PrimitiveColumn(name, size, is_valid, values)
class DateType(IntegerType):
DAY = 0
MILLISECOND = 1
# 1/1/1 to 12/31/9999
_ranges = {
DAY: [-719162, 2932896],
MILLISECOND: [-62135596800000, 253402214400000]
}
def __init__(self, name, unit, nullable=True):
bit_width = 32 if unit == self.DAY else 64
min_value, max_value = self._ranges[unit]
super(DateType, self).__init__(
name, True, bit_width, nullable=nullable,
min_value=min_value, max_value=max_value
)
self.unit = unit
def _get_type(self):
return OrderedDict([
('name', 'date'),
('unit', 'DAY' if self.unit == self.DAY else 'MILLISECOND')
])
TIMEUNIT_NAMES = {
's': 'SECOND',
'ms': 'MILLISECOND',
'us': 'MICROSECOND',
'ns': 'NANOSECOND'
}
class TimeType(IntegerType):
BIT_WIDTHS = {
's': 32,
'ms': 32,
'us': 64,
'ns': 64
}
_ranges = {
's': [0, 86400],
'ms': [0, 86400000],
'us': [0, 86400000000],
'ns': [0, 86400000000000]
}
def __init__(self, name, unit='s', nullable=True):
min_val, max_val = self._ranges[unit]
super(TimeType, self).__init__(name, True, self.BIT_WIDTHS[unit],
nullable=nullable,
min_value=min_val,
max_value=max_val)
self.unit = unit
def _get_type(self):
return OrderedDict([
('name', 'time'),
('unit', TIMEUNIT_NAMES[self.unit]),
('bitWidth', self.bit_width)
])
class TimestampType(IntegerType):
# 1/1/1 to 12/31/9999
_ranges = {
's': [-62135596800, 253402214400],
'ms': [-62135596800000, 253402214400000],
'us': [-62135596800000000, 253402214400000000],
# Physical range for int64, ~584 years and change
'ns': [np.iinfo('int64').min, np.iinfo('int64').max]
}
def __init__(self, name, unit='s', tz=None, nullable=True):
min_val, max_val = self._ranges[unit]
super(TimestampType, self).__init__(name, True, 64, nullable=nullable,
min_value=min_val,
max_value=max_val)
self.unit = unit
self.tz = tz
def _get_type(self):
fields = [
('name', 'timestamp'),
('unit', TIMEUNIT_NAMES[self.unit])
]
if self.tz is not None:
fields.append(('timezone', self.tz))
return OrderedDict(fields)
class FloatingPointType(PrimitiveType):
def __init__(self, name, bit_width, nullable=True):
super(FloatingPointType, self).__init__(name, nullable=nullable)
self.bit_width = bit_width
self.precision = {
16: 'HALF',
32: 'SINGLE',
64: 'DOUBLE'
}[self.bit_width]
@property
def numpy_type(self):
return 'float' + str(self.bit_width)
def _get_type(self):
return OrderedDict([
('name', 'floatingpoint'),
('precision', self.precision)
])
def generate_column(self, size, name=None):
values = np.random.randn(size) * 1000
values = np.round(values, 3)
is_valid = self._make_is_valid(size)
if name is None:
name = self.name
return PrimitiveColumn(name, size, is_valid, values)
DECIMAL_PRECISION_TO_VALUE = {
key: (1 << (8 * i - 1)) - 1 for i, key in enumerate(
[1, 3, 5, 7, 10, 12, 15, 17, 19, 22, 24, 27, 29, 32, 34, 36],
start=1,
)
}
def decimal_range_from_precision(precision):
assert 1 <= precision <= 38
try:
max_value = DECIMAL_PRECISION_TO_VALUE[precision]
except KeyError:
return decimal_range_from_precision(precision - 1)
else:
return ~max_value, max_value
class DecimalType(PrimitiveType):
def __init__(self, name, precision, scale, bit_width=128, nullable=True):
super(DecimalType, self).__init__(name, nullable=True)
self.precision = precision
self.scale = scale
self.bit_width = bit_width
@property
def numpy_type(self):
return object
def _get_type(self):
return OrderedDict([
('name', 'decimal'),
('precision', self.precision),
('scale', self.scale),
])
def generate_column(self, size, name=None):
min_value, max_value = decimal_range_from_precision(self.precision)
values = [random.randint(min_value, max_value) for _ in range(size)]
is_valid = self._make_is_valid(size)
if name is None:
name = self.name
return DecimalColumn(name, size, is_valid, values, self.bit_width)
class DecimalColumn(PrimitiveColumn):
def __init__(self, name, count, is_valid, values, bit_width=128):
super(DecimalColumn, self).__init__(name, count, is_valid, values)
self.bit_width = bit_width
def _encode_value(self, x):
return str(x)
class BooleanType(PrimitiveType):
bit_width = 1
def _get_type(self):
return OrderedDict([('name', 'bool')])
@property
def numpy_type(self):
return 'bool'
def generate_column(self, size, name=None):
values = list(map(bool, np.random.randint(0, 2, size=size)))
is_valid = self._make_is_valid(size)
if name is None:
name = self.name
return PrimitiveColumn(name, size, is_valid, values)
class BinaryType(PrimitiveType):
@property
def numpy_type(self):
return object
@property
def column_class(self):
return BinaryColumn
def _get_type(self):
return OrderedDict([('name', 'binary')])
def generate_column(self, size, name=None):
K = 7
is_valid = self._make_is_valid(size)
values = []
for i in range(size):
if is_valid[i]:
draw = (np.random.randint(0, 255, size=K)
.astype(np.uint8)
.tostring())
values.append(draw)
else:
values.append(b"")
if name is None:
name = self.name
return self.column_class(name, size, is_valid, values)
class FixedSizeBinaryType(PrimitiveType):
def __init__(self, name, byte_width, nullable=True):
super(FixedSizeBinaryType, self).__init__(name, nullable=nullable)
self.byte_width = byte_width
@property
def numpy_type(self):
return object
@property
def column_class(self):
return FixedSizeBinaryColumn
def _get_type(self):
return OrderedDict([('name', 'fixedsizebinary'), ('byteWidth', self.byte_width)])
def _get_type_layout(self):
return OrderedDict([
('vectors',
[OrderedDict([('type', 'VALIDITY'),
('typeBitWidth', 1)]),
OrderedDict([('type', 'DATA'),
('typeBitWidth', self.byte_width)])])])
def generate_column(self, size, name=None):
is_valid = self._make_is_valid(size)
values = []
for i in range(size):
draw = (np.random.randint(0, 255, size=self.byte_width)
.astype(np.uint8)
.tostring())
values.append(draw)
if name is None:
name = self.name
return self.column_class(name, size, is_valid, values)
class StringType(BinaryType):
@property
def column_class(self):
return StringColumn
def _get_type(self):
return OrderedDict([('name', 'utf8')])
def generate_column(self, size, name=None):
K = 7
is_valid = self._make_is_valid(size)
values = []
for i in range(size):
if is_valid[i]:
values.append(tobytes(rands(K)))
else:
values.append(b"")
if name is None:
name = self.name
return self.column_class(name, size, is_valid, values)
class JsonSchema(object):
def __init__(self, fields):
self.fields = fields
def get_json(self):
return OrderedDict([
('fields', [field.get_json() for field in self.fields])
])
class BinaryColumn(PrimitiveColumn):
def _encode_value(self, x):
return frombytes(binascii.hexlify(x).upper())
def _get_buffers(self):
offset = 0
offsets = [0]
data = []
for i, v in enumerate(self.values):
if self.is_valid[i]:
offset += len(v)
else:
v = b""
offsets.append(offset)
data.append(self._encode_value(v))
return [
('VALIDITY', [int(x) for x in self.is_valid]),
('OFFSET', offsets),
('DATA', data)
]
class FixedSizeBinaryColumn(PrimitiveColumn):
def _encode_value(self, x):
return ''.join('{:02x}'.format(c).upper() for c in x)
def _get_buffers(self):
data = []
for i, v in enumerate(self.values):
data.append(self._encode_value(v))
return [
('VALIDITY', [int(x) for x in self.is_valid]),
('DATA', data)
]
class StringColumn(BinaryColumn):
def _encode_value(self, x):
return frombytes(x)
class ListType(DataType):
def __init__(self, name, value_type, nullable=True):
super(ListType, self).__init__(name, nullable=nullable)
self.value_type = value_type
def _get_type(self):
return OrderedDict([
('name', 'list')
])
def _get_children(self):
return [self.value_type.get_json()]
def generate_column(self, size, name=None):
MAX_LIST_SIZE = 4
is_valid = self._make_is_valid(size)
list_sizes = np.random.randint(0, MAX_LIST_SIZE + 1, size=size)
offsets = [0]
offset = 0
for i in range(size):
if is_valid[i]:
offset += int(list_sizes[i])
offsets.append(offset)
# The offset now is the total number of elements in the child array
values = self.value_type.generate_column(offset)
if name is None:
name = self.name
return ListColumn(name, size, is_valid, offsets, values)
class ListColumn(Column):
def __init__(self, name, count, is_valid, offsets, values):
super(ListColumn, self).__init__(name, count)
self.is_valid = is_valid
self.offsets = offsets
self.values = values
def _get_buffers(self):
return [
('VALIDITY', [int(v) for v in self.is_valid]),
('OFFSET', list(self.offsets))
]
def _get_children(self):
return [self.values.get_json()]
class StructType(DataType):
def __init__(self, name, field_types, nullable=True):
super(StructType, self).__init__(name, nullable=nullable)
self.field_types = field_types
def _get_type(self):
return OrderedDict([
('name', 'struct')
])
def _get_children(self):
return [type_.get_json() for type_ in self.field_types]
def generate_column(self, size, name=None):
is_valid = self._make_is_valid(size)
field_values = [type_.generate_column(size)
for type_ in self.field_types]
if name is None:
name = self.name
return StructColumn(name, size, is_valid, field_values)
class Dictionary(object):
def __init__(self, id_, field, values, ordered=False):
self.id_ = id_
self.field = field
self.values = values
self.ordered = ordered
def __len__(self):
return len(self.values)
def get_json(self):
dummy_batch = JsonRecordBatch(len(self.values), [self.values])
return OrderedDict([
('id', self.id_),
('data', dummy_batch.get_json())
])
class DictionaryType(DataType):
def __init__(self, name, index_type, dictionary, nullable=True):
super(DictionaryType, self).__init__(name, nullable=nullable)
assert isinstance(index_type, IntegerType)
assert isinstance(dictionary, Dictionary)
self.index_type = index_type
self.dictionary = dictionary
def get_json(self):
dict_field = self.dictionary.field
return OrderedDict([
('name', self.name),
('type', dict_field._get_type()),
('nullable', self.nullable),
('children', dict_field._get_children()),
('dictionary', OrderedDict([
('id', self.dictionary.id_),
('indexType', self.index_type._get_type()),
('isOrdered', self.dictionary.ordered)
]))
])
def generate_column(self, size, name=None):
if name is None:
name = self.name
return self.index_type.generate_range(size, 0, len(self.dictionary),
name=name)
class StructColumn(Column):
def __init__(self, name, count, is_valid, field_values):
super(StructColumn, self).__init__(name, count)
self.is_valid = is_valid
self.field_values = field_values
def _get_buffers(self):
return [
('VALIDITY', [int(v) for v in self.is_valid])
]
def _get_children(self):
return [field.get_json() for field in self.field_values]
class JsonRecordBatch(object):
def __init__(self, count, columns):
self.count = count
self.columns = columns
def get_json(self):
return OrderedDict([
('count', self.count),
('columns', [col.get_json() for col in self.columns])
])
class JsonFile(object):
def __init__(self, name, schema, batches, dictionaries=None):
self.name = name
self.schema = schema
self.dictionaries = dictionaries or []
self.batches = batches
def get_json(self):
entries = [
('schema', self.schema.get_json())
]
if len(self.dictionaries) > 0:
entries.append(('dictionaries',
[dictionary.get_json()
for dictionary in self.dictionaries]))
entries.append(('batches', [batch.get_json()
for batch in self.batches]))
return OrderedDict(entries)
def write(self, path):
with open(path, 'wb') as f:
f.write(json.dumps(self.get_json(), indent=2).encode('utf-8'))
def get_field(name, type_, nullable=True):
if type_ == 'binary':
return BinaryType(name, nullable=nullable)
elif type_ == 'utf8':
return StringType(name, nullable=nullable)
elif type_.startswith('fixedsizebinary_'):
byte_width = int(type_.split('_')[1])
return FixedSizeBinaryType(name, byte_width=byte_width, nullable=nullable)
dtype = np.dtype(type_)
if dtype.kind in ('i', 'u'):
return IntegerType(name, dtype.kind == 'i', dtype.itemsize * 8,
nullable=nullable)
elif dtype.kind == 'f':
return FloatingPointType(name, dtype.itemsize * 8,
nullable=nullable)
elif dtype.kind == 'b':
return BooleanType(name, nullable=nullable)
else:
raise TypeError(dtype)
def _generate_file(name, fields, batch_sizes, dictionaries=None):
schema = JsonSchema(fields)
batches = []
for size in batch_sizes:
columns = []
for field in fields:
col = field.generate_column(size)
columns.append(col)
batches.append(JsonRecordBatch(size, columns))
return JsonFile(name, schema, batches, dictionaries)
def generate_primitive_case(batch_sizes, name='primitive'):
types = ['bool', 'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64', 'binary', 'utf8',
'fixedsizebinary_19', 'fixedsizebinary_120']
fields = []
for type_ in types:
fields.append(get_field(type_ + "_nullable", type_, True))
fields.append(get_field(type_ + "_nonnullable", type_, False))
return _generate_file(name, fields, batch_sizes)
def generate_decimal_case():
fields = [
DecimalType(name='f{}'.format(i), precision=precision, scale=2)
for i, precision in enumerate(range(3, 39))
]
possible_batch_sizes = 7, 10
batch_sizes = [possible_batch_sizes[i % 2] for i in range(len(fields))]
return _generate_file('decimal', fields, batch_sizes)
def generate_datetime_case():
fields = [
DateType('f0', DateType.DAY),
DateType('f1', DateType.MILLISECOND),
TimeType('f2', 's'),
TimeType('f3', 'ms'),
TimeType('f4', 'us'),
TimeType('f5', 'ns'),
TimestampType('f6', 's'),
TimestampType('f7', 'ms'),
TimestampType('f8', 'us'),
TimestampType('f9', 'ns'),
TimestampType('f10', 'ms', tz=None),
TimestampType('f11', 's', tz='UTC'),
TimestampType('f12', 'ms', tz='US/Eastern'),
TimestampType('f13', 'us', tz='Europe/Paris'),
TimestampType('f14', 'ns', tz='US/Pacific')
]
batch_sizes = [7, 10]
return _generate_file("datetime", fields, batch_sizes)
def generate_nested_case():
fields = [
ListType('list_nullable', get_field('item', 'int32')),
StructType('struct_nullable', [get_field('f1', 'int32'),
get_field('f2', 'utf8')]),
# TODO(wesm): this causes segfault
# ListType('list_nonnullable', get_field('item', 'int32'), False),
]
batch_sizes = [7, 10]
return _generate_file("nested", fields, batch_sizes)
def generate_dictionary_case():
dict_type1 = StringType('dictionary1')
dict_type2 = get_field('dictionary2', 'int64')
dict1 = Dictionary(0, dict_type1,
dict_type1.generate_column(10, name='DICT0'))
dict2 = Dictionary(1, dict_type2,
dict_type2.generate_column(50, name='DICT1'))
fields = [
DictionaryType('dict1_0', get_field('', 'int8'), dict1),
DictionaryType('dict1_1', get_field('', 'int32'), dict1),
DictionaryType('dict2_0', get_field('', 'int16'), dict2)
]
batch_sizes = [7, 10]
return _generate_file("dictionary", fields, batch_sizes,
dictionaries=[dict1, dict2])
def get_generated_json_files():
temp_dir = tempfile.mkdtemp()
def _temp_path():
return
file_objs = [
generate_primitive_case([17, 20], name='primitive'),
generate_primitive_case([0, 0, 0], name='primitive_zerolength'),
generate_decimal_case(),
generate_datetime_case(),
generate_nested_case(),
generate_dictionary_case()
]
generated_paths = []
for file_obj in file_objs:
out_path = os.path.join(temp_dir, 'generated_' +
file_obj.name + '.json')
file_obj.write(out_path)
generated_paths.append(out_path)
return generated_paths
# ----------------------------------------------------------------------
# Testing harness
class IntegrationRunner(object):
def __init__(self, json_files, testers, debug=False):
self.json_files = json_files
self.testers = testers
self.temp_dir = tempfile.mkdtemp()
self.debug = debug
def run(self):
for producer, consumer in itertools.product(filter(lambda t: t.PRODUCER, self.testers),
filter(lambda t: t.CONSUMER, self.testers)):
self._compare_implementations(producer, consumer)
def _compare_implementations(self, producer, consumer):
print('##########################################################')
print(
'{0} producing, {1} consuming'.format(producer.name, consumer.name)
)
print('##########################################################')
for json_path in self.json_files:
print('==========================================================')
print('Testing file {0}'.format(json_path))
print('==========================================================')
name = os.path.splitext(os.path.basename(json_path))[0]
# Make the random access file
print('-- Creating binary inputs')
producer_file_path = os.path.join(self.temp_dir, guid() + '_' +
name + '.json_to_arrow')
producer.json_to_file(json_path, producer_file_path)
# Validate the file
print('-- Validating file')
consumer.validate(json_path, producer_file_path)
print('-- Validating stream')
producer_stream_path = os.path.join(self.temp_dir, guid() + '_' +
name + '.arrow_to_stream')
consumer_file_path = os.path.join(self.temp_dir, guid() + '_' +
name + '.stream_to_arrow')
producer.file_to_stream(producer_file_path,
producer_stream_path)
consumer.stream_to_file(producer_stream_path,
consumer_file_path)
consumer.validate(json_path, consumer_file_path)
class Tester(object):
PRODUCER = False
CONSUMER = False
def __init__(self, debug=False):
self.debug = debug
def json_to_file(self, json_path, arrow_path):
raise NotImplementedError
def stream_to_file(self, stream_path, file_path):
raise NotImplementedError
def file_to_stream(self, file_path, stream_path):
raise NotImplementedError
def validate(self, json_path, arrow_path):
raise NotImplementedError
class JavaTester(Tester):
PRODUCER = True
CONSUMER = True
_arrow_version = load_version_from_pom()
ARROW_TOOLS_JAR = os.environ.get(
'ARROW_JAVA_INTEGRATION_JAR',
os.path.join(ARROW_HOME,
'java/tools/target/arrow-tools-{}-'
'jar-with-dependencies.jar'.format(_arrow_version)))
name = 'Java'
def _run(self, arrow_path=None, json_path=None, command='VALIDATE'):
cmd = ['java', '-cp', self.ARROW_TOOLS_JAR,
'org.apache.arrow.tools.Integration']
if arrow_path is not None:
cmd.extend(['-a', arrow_path])
if json_path is not None:
cmd.extend(['-j', json_path])
cmd.extend(['-c', command])
if self.debug:
print(' '.join(cmd))
run_cmd(cmd)
def validate(self, json_path, arrow_path):
return self._run(arrow_path, json_path, 'VALIDATE')
def json_to_file(self, json_path, arrow_path):
return self._run(arrow_path, json_path, 'JSON_TO_ARROW')
def stream_to_file(self, stream_path, file_path):
cmd = ['java', '-cp', self.ARROW_TOOLS_JAR,
'org.apache.arrow.tools.StreamToFile',
stream_path, file_path]
if self.debug:
print(' '.join(cmd))
run_cmd(cmd)
def file_to_stream(self, file_path, stream_path):
cmd = ['java', '-cp', self.ARROW_TOOLS_JAR,
'org.apache.arrow.tools.FileToStream',
file_path, stream_path]
if self.debug:
print(' '.join(cmd))
run_cmd(cmd)
class CPPTester(Tester):
PRODUCER = True
CONSUMER = True
EXE_PATH = os.environ.get(
'ARROW_CPP_EXE_PATH',
os.path.join(ARROW_HOME, 'cpp/build/debug'))
CPP_INTEGRATION_EXE = os.path.join(EXE_PATH, 'json-integration-test')
STREAM_TO_FILE = os.path.join(EXE_PATH, 'stream-to-file')
FILE_TO_STREAM = os.path.join(EXE_PATH, 'file-to-stream')
name = 'C++'
def _run(self, arrow_path=None, json_path=None, command='VALIDATE'):
cmd = [self.CPP_INTEGRATION_EXE, '--integration']
if arrow_path is not None:
cmd.append('--arrow=' + arrow_path)
if json_path is not None:
cmd.append('--json=' + json_path)
cmd.append('--mode=' + command)
if self.debug:
print(' '.join(cmd))
run_cmd(cmd)
def validate(self, json_path, arrow_path):
return self._run(arrow_path, json_path, 'VALIDATE')
def json_to_file(self, json_path, arrow_path):
return self._run(arrow_path, json_path, 'JSON_TO_ARROW')
def stream_to_file(self, stream_path, file_path):
cmd = ['cat', stream_path, '|', self.STREAM_TO_FILE, '>', file_path]
cmd = ' '.join(cmd)
if self.debug:
print(cmd)
os.system(cmd)
def file_to_stream(self, file_path, stream_path):
cmd = [self.FILE_TO_STREAM, file_path, '>', stream_path]
cmd = ' '.join(cmd)
if self.debug:
print(cmd)
os.system(cmd)
class JSTester(Tester):
PRODUCER = False
CONSUMER = True
INTEGRATION_EXE = os.path.join(ARROW_HOME, 'js/bin/integration.js')
name = 'JS'
def _run(self, arrow_path=None, json_path=None, command='VALIDATE'):
cmd = [self.INTEGRATION_EXE]
if arrow_path is not None:
cmd.extend(['-a', arrow_path])
if json_path is not None:
cmd.extend(['-j', json_path])
cmd.extend(['--mode', command])
if self.debug:
print(' '.join(cmd))
run_cmd(cmd)
def validate(self, json_path, arrow_path):
return self._run(arrow_path, json_path, 'VALIDATE')
def stream_to_file(self, stream_path, file_path):
# Just copy stream to file, we can read the stream directly
cmd = ['cp', stream_path, file_path]
cmd = ' '.join(cmd)
if self.debug:
print(cmd)
os.system(cmd)
def get_static_json_files():
glob_pattern = os.path.join(ARROW_HOME, 'integration', 'data', '*.json')
return glob.glob(glob_pattern)
def run_all_tests(debug=False):
testers = [CPPTester(debug=debug), JavaTester(debug=debug), JSTester(debug=debug)]
static_json_files = get_static_json_files()
generated_json_files = get_generated_json_files()
json_files = static_json_files + generated_json_files
runner = IntegrationRunner(json_files, testers, debug=debug)
runner.run()
print('-- All tests passed!')
def write_js_test_json(directory):
generate_nested_case().write(os.path.join(directory, 'nested.json'))
generate_decimal_case().write(os.path.join(directory, 'decimal.json'))
generate_datetime_case().write(os.path.join(directory, 'datetime.json'))
(generate_dictionary_case()
.write(os.path.join(directory, 'dictionary.json')))
(generate_primitive_case([7, 10])
.write(os.path.join(directory, 'primitive.json')))
(generate_primitive_case([0, 0, 0])
.write(os.path.join(directory, 'primitive-empty.json')))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Arrow integration test CLI')
parser.add_argument('--write_generated_json', dest='generated_json_path',
action='store', default=False,
help='Generate test JSON')
parser.add_argument('--debug', dest='debug', action='store_true',
default=False,
help='Run executables in debug mode as relevant')
args = parser.parse_args()
if args.generated_json_path:
try:
os.makedirs(args.generated_json_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
write_js_test_json(args.generated_json_path)
else:
run_all_tests(debug=args.debug)
| apache-2.0 |
ActiveState/code | recipes/Python/543261_grade_keeper/recipe-543261.py | 1 | 2222 | #! /usr/bin/python
# keep record of grades. Made by Caleb Herbert. 0.1-PUBLIC
# NOTE! All letter answers are to be written in quotes (including dates)!
print """############################################
# Welcome to Gradebook! v 0.1 #
# YOUR LIGHT WEIGHT SCHOOL RECORD MANAGER! #
############################################"""
subject = raw_input("What is your assignment's subject? ")
# ^^This asks your class subject; assigns it to 'subject'; and is used later.
date = input('What is the date for your assignment? ')
# ^^This is pretty much the same: but asks the date.
amount = input('What is the number of questions? (NOTE: make all #s from now decimals. e.g.: "5.0" ')
# ^^^This is also the same, but make the number a DECIMAL!
correct = input('How many questions did you get correct? ')
# ^^^The same... make all DECIMALS!
calc = divmod(correct, amount)
# This is a nice homework trick. Divides correct by amount, assigns to 'calc'
calcx = (correct / amount)
# divides correct by amount; assigns to 'calcx'
text = "***%s*** \n %s | %d out of %d | %s or %s \n" % (date, subject, correct, amount, calc, calcx)
# creates what will be in your file. assigns to 'text'
print text
# prints what it will put in your file (or append).
fle = raw_input('What should I name the file to put the above data into? ')
# prompts for a filename
A = input('Do you want this to be appended to an existing file? ')
# decides to either append,or to create new file. assigns answer to 'A'
print 'Thanks! appending to file... '
if A is 'yes': #if you answered yes:
fyl = open(fle, 'a')
# the phrase 'fyl' is used to combine open('fle, 'a') with future commands
fyl.write(text)
# the command assigned to 'fyl' writes your data to the filename you said.
fyl.close()
# closes the file; job is done.
elif A is 'no': # if you said no, this will happen:
fyl = open(fle, 'w')
# same as before, but saves the file (see the 'w' instead of 'a'?)
fyl.write(text)
# same
fyl.close()
# same
else: # and if nothing was valid...
print 'Error! Invalid transaction! '
# ...error message!
print 'Done!'
# says it is done
raw_input("Press <RETURN> to quit.")
# makes you type <enter> to quit.
| mit |
h3biomed/ansible | test/units/modules/network/iosxr/test_iosxr_config.py | 59 | 10185 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch, MagicMock
from ansible.modules.network.iosxr import iosxr_config
from ansible.plugins.cliconf.iosxr import Cliconf
from units.modules.utils import set_module_args
from .iosxr_module import TestIosxrModule, load_fixture
class TestIosxrConfigModule(TestIosxrModule):
module = iosxr_config
def setUp(self):
super(TestIosxrConfigModule, self).setUp()
self.patcher_get_config = patch('ansible.modules.network.iosxr.iosxr_config.get_config')
self.mock_get_config = self.patcher_get_config.start()
self.patcher_exec_command = patch('ansible.modules.network.iosxr.iosxr_config.load_config')
self.mock_exec_command = self.patcher_exec_command.start()
self.mock_get_connection = patch('ansible.modules.network.iosxr.iosxr_config.get_connection')
self.get_connection = self.mock_get_connection.start()
self.conn = self.get_connection()
self.conn.edit_config = MagicMock()
self.cliconf_obj = Cliconf(MagicMock())
self.running_config = load_fixture('iosxr_config_config.cfg')
def tearDown(self):
super(TestIosxrConfigModule, self).tearDown()
self.patcher_get_config.stop()
self.patcher_exec_command.stop()
self.mock_get_connection.stop()
def load_fixtures(self, commands=None):
config_file = 'iosxr_config_config.cfg'
self.mock_get_config.return_value = load_fixture(config_file)
self.mock_exec_command.return_value = 'dummy diff'
def test_iosxr_config_unchanged(self):
src = load_fixture('iosxr_config_config.cfg')
set_module_args(dict(src=src))
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff(src, src))
self.execute_module()
def test_iosxr_config_src(self):
src = load_fixture('iosxr_config_src.cfg')
set_module_args(dict(src=src))
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff(src, self.running_config))
commands = ['hostname foo', 'interface GigabitEthernet0/0',
'no ip address']
self.execute_module(changed=True, commands=commands)
def test_iosxr_config_backup(self):
set_module_args(dict(backup=True))
result = self.execute_module()
self.assertIn('__backup__', result)
def test_iosxr_config_lines_wo_parents(self):
lines = ['hostname foo']
set_module_args(dict(lines=lines))
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff('\n'.join(lines), self.running_config))
commands = ['hostname foo']
self.execute_module(changed=True, commands=commands)
def test_iosxr_config_lines_w_parents(self):
lines = ['shutdown']
parents = ['interface GigabitEthernet0/0']
candidate = parents + lines
set_module_args(dict(lines=lines, parents=parents))
module = MagicMock()
module.params = {'lines': lines, 'parents': parents, 'src': None}
candidate_config = iosxr_config.get_candidate(module)
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff(candidate_config, self.running_config))
commands = ['interface GigabitEthernet0/0', 'shutdown']
self.execute_module(changed=True, commands=commands)
def test_iosxr_config_before(self):
lines = ['hostname foo']
set_module_args(dict(lines=lines, before=['test1', 'test2']))
commands = ['test1', 'test2', 'hostname foo']
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff('\n'.join(lines), self.running_config))
self.execute_module(changed=True, commands=commands, sort=False)
def test_iosxr_config_after(self):
lines = ['hostname foo']
set_module_args(dict(lines=lines, after=['test1', 'test2']))
commands = ['hostname foo', 'test1', 'test2']
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff('\n'.join(lines), self.running_config))
self.execute_module(changed=True, commands=commands, sort=False)
def test_iosxr_config_before_after_no_change(self):
lines = ['hostname router']
set_module_args(dict(lines=lines,
before=['test1', 'test2'],
after=['test3', 'test4']))
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff('\n'.join(lines), self.running_config))
self.execute_module()
def test_iosxr_config_config(self):
config = 'hostname localhost'
lines = ['hostname router']
set_module_args(dict(lines=['hostname router'], config=config))
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff('\n'.join(lines), config))
commands = ['hostname router']
self.execute_module(changed=True, commands=commands)
def test_iosxr_config_replace_block(self):
lines = ['description test string', 'test string']
parents = ['interface GigabitEthernet0/0']
set_module_args(dict(lines=lines, replace='block', parents=parents))
commands = parents + lines
module = MagicMock()
module.params = {'lines': lines, 'parents': parents, 'src': None}
candidate_config = iosxr_config.get_candidate(module)
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff(candidate_config, self.running_config, diff_replace='block', path=parents))
self.execute_module(changed=True, commands=commands)
def test_iosxr_config_force(self):
lines = ['hostname router']
set_module_args(dict(lines=lines, force=True))
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff('\n'.join(lines), self.running_config, diff_match='none'))
self.execute_module(changed=True, commands=lines)
def test_iosxr_config_admin(self):
lines = ['username admin', 'group root-system', 'secret P@ssw0rd']
set_module_args(dict(lines=lines, admin=True))
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff('\n'.join(lines), self.running_config))
self.execute_module(changed=True, commands=lines)
def test_iosxr_config_match_none(self):
lines = ['ip address 1.2.3.4 255.255.255.0', 'description test string']
parents = ['interface GigabitEthernet0/0']
set_module_args(dict(lines=lines, parents=parents, match='none'))
commands = parents + lines
module = MagicMock()
module.params = {'lines': lines, 'parents': parents, 'src': None}
candidate_config = iosxr_config.get_candidate(module)
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff(candidate_config, self.running_config, diff_match='none', path=parents))
self.execute_module(changed=True, commands=commands, sort=False)
def test_iosxr_config_match_strict(self):
lines = ['ip address 1.2.3.4 255.255.255.0', 'description test string',
'shutdown']
parents = ['interface GigabitEthernet0/0']
set_module_args(dict(lines=lines, parents=parents, match='strict'))
commands = parents + ['shutdown']
module = MagicMock()
module.params = {'lines': lines, 'parents': parents, 'src': None}
candidate_config = iosxr_config.get_candidate(module)
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff(candidate_config, self.running_config, diff_match='strict', path=parents))
self.execute_module(changed=True, commands=commands, sort=False)
def test_iosxr_config_match_exact(self):
lines = ['ip address 1.2.3.4 255.255.255.0', 'description test string',
'shutdown']
parents = ['interface GigabitEthernet0/0']
set_module_args(dict(lines=lines, parents=parents, match='exact'))
commands = parents + lines
module = MagicMock()
module.params = {'lines': lines, 'parents': parents, 'src': None}
candidate_config = iosxr_config.get_candidate(module)
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff(candidate_config, self.running_config, diff_match='exact', path=parents))
self.execute_module(changed=True, commands=commands, sort=False)
def test_iosxr_config_src_and_lines_fails(self):
args = dict(src='foo', lines='foo')
set_module_args(args)
result = self.execute_module(failed=True)
def test_iosxr_config_src_and_parents_fails(self):
args = dict(src='foo', parents='foo')
set_module_args(args)
result = self.execute_module(failed=True)
def test_iosxr_config_match_exact_requires_lines(self):
args = dict(match='exact')
set_module_args(args)
result = self.execute_module(failed=True)
def test_iosxr_config_match_strict_requires_lines(self):
args = dict(match='strict')
set_module_args(args)
result = self.execute_module(failed=True)
def test_iosxr_config_replace_block_requires_lines(self):
args = dict(replace='block')
set_module_args(args)
result = self.execute_module(failed=True)
def test_iosxr_config_replace_config_requires_src(self):
args = dict(replace='config')
set_module_args(args)
result = self.execute_module(failed=True)
| gpl-3.0 |
bzhou26/NRA-Crawler | geopy/geocoders/openmapquest.py | 13 | 4107 | """
:class:`.OpenMapQuest` geocoder.
"""
from geopy.compat import urlencode
from geopy.geocoders.base import (
Geocoder,
DEFAULT_FORMAT_STRING,
DEFAULT_TIMEOUT,
DEFAULT_SCHEME
)
from geopy.location import Location
from geopy.util import logger
__all__ = ("OpenMapQuest", )
class OpenMapQuest(Geocoder): # pylint: disable=W0223
"""
Geocoder using MapQuest Open Platform Web Services. Documentation at:
http://developer.mapquest.com/web/products/open/geocoding-service
"""
def __init__(
self,
api_key=None,
format_string=DEFAULT_FORMAT_STRING,
scheme=DEFAULT_SCHEME,
timeout=DEFAULT_TIMEOUT,
proxies=None,
user_agent=None,
): # pylint: disable=R0913
"""
Initialize an Open MapQuest geocoder with location-specific
address information. No API Key is needed by the Nominatim based
platform.
:param string format_string: String containing '%s' where
the string to geocode should be interpolated before querying
the geocoder. For example: '%s, Mountain View, CA'. The default
is just '%s'.
:param string scheme: Use 'https' or 'http' as the API URL's scheme.
Default is https. Note that SSL connections' certificates are not
verified.
.. versionadded:: 0.97
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception.
.. versionadded:: 0.97
:param dict proxies: If specified, routes this geocoder's requests
through the specified proxy. E.g., {"https": "192.0.2.0"}. For
more information, see documentation on
:class:`urllib2.ProxyHandler`.
.. versionadded:: 0.96
"""
super(OpenMapQuest, self).__init__(
format_string, scheme, timeout, proxies, user_agent=user_agent
)
self.api_key = api_key or ''
self.api = "%s://open.mapquestapi.com/nominatim/v1/search" \
"?format=json" % self.scheme
def geocode(self, query, exactly_one=True, timeout=None): # pylint: disable=W0221
"""
Geocode a location query.
:param string query: The address or query you wish to geocode.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
.. versionadded:: 0.97
"""
params = {
'q': self.format_string % query
}
if exactly_one:
params['maxResults'] = 1
url = "&".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout),
exactly_one
)
@classmethod
def _parse_json(cls, resources, exactly_one=True):
"""
Parse display name, latitude, and longitude from an JSON response.
"""
if not len(resources): # pragma: no cover
return None
if exactly_one:
return cls.parse_resource(resources[0])
else:
return [cls.parse_resource(resource) for resource in resources]
@classmethod
def parse_resource(cls, resource):
"""
Return location and coordinates tuple from dict.
"""
location = resource['display_name']
latitude = resource['lat'] or None
longitude = resource['lon'] or None
if latitude and longitude:
latitude = float(latitude)
longitude = float(longitude)
return Location(location, (latitude, longitude), resource)
| mit |
bdyetton/prettychart | website/project/views/node.py | 1 | 40848 | # -*- coding: utf-8 -*-
import logging
import httplib as http
import math
from itertools import islice
from flask import request
from modularodm import Q
from modularodm.exceptions import ModularOdmException, ValidationValueError
from framework import status
from framework.utils import iso8601format
from framework.mongo import StoredObject
from framework.auth.decorators import must_be_logged_in, collect_auth
from framework.exceptions import HTTPError, PermissionsError
from framework.mongo.utils import from_mongo, get_or_http_error
from website import language
from website.util import paths
from website.util import rubeus
from website.exceptions import NodeStateError
from website.project import clean_template_name, new_node, new_private_link
from website.project.decorators import (
must_be_contributor_or_public,
must_be_contributor,
must_be_valid_project,
must_have_permission,
must_not_be_registration,
)
from website.util.permissions import ADMIN, READ, WRITE
from website.util.rubeus import collect_addon_js
from website.project.model import has_anonymous_link, get_pointer_parent, NodeUpdateError
from website.project.forms import NewNodeForm
from website.models import Node, Pointer, WatchConfig, PrivateLink
from website import settings
from website.views import _render_nodes, find_dashboard, validate_page_num
from website.profile import utils
from website.project import new_folder
from website.util.sanitize import strip_html
logger = logging.getLogger(__name__)
@must_be_valid_project
@must_have_permission(WRITE)
@must_not_be_registration
def edit_node(auth, node, **kwargs):
post_data = request.json
edited_field = post_data.get('name')
value = strip_html(post_data.get('value', ''))
if edited_field == 'title':
try:
node.set_title(value, auth=auth)
except ValidationValueError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
elif edited_field == 'description':
node.set_description(value, auth=auth)
node.save()
return {'status': 'success'}
##############################################################################
# New Project
##############################################################################
@must_be_logged_in
def project_new(**kwargs):
return {}
@must_be_logged_in
def project_new_post(auth, **kwargs):
user = auth.user
data = request.get_json()
title = strip_html(data.get('title'))
title = title.strip()
category = data.get('category', 'project')
template = data.get('template')
description = strip_html(data.get('description'))
new_project = {}
if template:
original_node = Node.load(template)
changes = {
'title': title,
'category': category,
'template_node': original_node,
}
if description:
changes['description'] = description
project = original_node.use_as_template(
auth=auth,
changes={
template: changes,
}
)
else:
try:
project = new_node(category, title, user, description)
except ValidationValueError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
new_project = _view_project(project, auth)
return {
'projectUrl': project.url,
'newNode': new_project['node'] if new_project else None
}, http.CREATED
@must_be_logged_in
@must_be_valid_project
def project_new_from_template(auth, node, **kwargs):
new_node = node.use_as_template(
auth=auth,
changes=dict(),
)
return {'url': new_node.url}, http.CREATED, None
##############################################################################
# New Folder
##############################################################################
@must_be_valid_project
@must_be_logged_in
def folder_new_post(auth, node, **kwargs):
user = auth.user
title = request.json.get('title')
if not node.is_folder:
raise HTTPError(http.BAD_REQUEST)
folder = new_folder(strip_html(title), user)
folders = [folder]
try:
_add_pointers(node, folders, auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {
'projectUrl': '/dashboard/',
}, http.CREATED
@collect_auth
def add_folder(auth, **kwargs):
data = request.get_json()
node_id = data.get('node_id')
node = get_or_http_error(Node, node_id)
user = auth.user
title = strip_html(data.get('title'))
if not node.is_folder:
raise HTTPError(http.BAD_REQUEST)
folder = new_folder(
title, user
)
folders = [folder]
try:
_add_pointers(node, folders, auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {}, 201, None
##############################################################################
# New Node
##############################################################################
@must_be_valid_project
@must_have_permission(WRITE)
@must_not_be_registration
def project_new_node(auth, node, **kwargs):
form = NewNodeForm(request.form)
user = auth.user
if form.validate():
try:
node = new_node(
title=strip_html(form.title.data),
user=user,
category=form.category.data,
parent=node,
)
except ValidationValueError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
message = (
'Your component was created successfully. You can keep working on the component page below, '
'or return to the <u><a href="{url}">Project Page</a></u>.'
).format(url=node.url)
status.push_status_message(message, 'info')
return {
'status': 'success',
}, 201, None, node.url
else:
status.push_errors_to_status(form.errors)
raise HTTPError(http.BAD_REQUEST, redirect_url=node.url)
@must_be_logged_in
@must_be_valid_project
def project_before_fork(auth, node, **kwargs):
user = auth.user
prompts = node.callback('before_fork', user=user)
if node.has_pointers_recursive:
prompts.append(
language.BEFORE_FORK_HAS_POINTERS.format(
category=node.project_or_component
)
)
return {'prompts': prompts}
@must_be_logged_in
@must_be_valid_project
def project_before_template(auth, node, **kwargs):
prompts = []
for addon in node.get_addons():
if 'node' in addon.config.configs:
if addon.to_json(auth.user)['addon_full_name']:
prompts.append(addon.to_json(auth.user)['addon_full_name'])
return {'prompts': prompts}
@must_be_logged_in
@must_be_valid_project
def node_fork_page(auth, node, **kwargs):
if settings.DISK_SAVING_MODE:
raise HTTPError(
http.METHOD_NOT_ALLOWED,
redirect_url=node.url
)
try:
fork = node.fork_node(auth)
except PermissionsError:
raise HTTPError(
http.FORBIDDEN,
redirect_url=node.url
)
return fork.url
@must_be_valid_project
@must_be_contributor_or_public
def node_registrations(auth, node, **kwargs):
return _view_project(node, auth, primary=True)
@must_be_valid_project
@must_be_contributor_or_public
def node_forks(auth, node, **kwargs):
return _view_project(node, auth, primary=True)
@must_be_valid_project
@must_be_logged_in
@must_be_contributor
def node_setting(auth, node, **kwargs):
ret = _view_project(node, auth, primary=True)
addons_enabled = []
addon_enabled_settings = []
for addon in node.get_addons():
addons_enabled.append(addon.config.short_name)
if 'node' in addon.config.configs:
config = addon.to_json(auth.user)
# inject the MakoTemplateLookup into the template context
# TODO inject only short_name and render fully client side
config['template_lookup'] = addon.config.template_lookup
config['addon_icon_url'] = addon.config.icon_url
addon_enabled_settings.append(config)
addon_enabled_settings = sorted(addon_enabled_settings, key=lambda addon: addon['addon_full_name'].lower())
ret['addon_categories'] = settings.ADDON_CATEGORIES
ret['addons_available'] = sorted([
addon
for addon in settings.ADDONS_AVAILABLE
if 'node' in addon.owners
and addon.short_name not in settings.SYSTEM_ADDED_ADDONS['node']
], key=lambda addon: addon.full_name.lower())
ret['addons_enabled'] = addons_enabled
ret['addon_enabled_settings'] = addon_enabled_settings
ret['addon_capabilities'] = settings.ADDON_CAPABILITIES
ret['addon_js'] = collect_node_config_js(node.get_addons())
ret['comments'] = {
'level': node.comment_level,
}
ret['categories'] = Node.CATEGORY_MAP
ret['categories'].update({
'project': 'Project'
})
return ret
def collect_node_config_js(addons):
"""Collect webpack bundles for each of the addons' node-cfg.js modules. Return
the URLs for each of the JS modules to be included on the node addons config page.
:param list addons: List of node's addon config records.
"""
js_modules = []
for addon in addons:
js_path = paths.resolve_addon_path(addon.config, 'node-cfg.js')
if js_path:
js_modules.append(js_path)
return js_modules
@must_have_permission(WRITE)
@must_not_be_registration
def node_choose_addons(auth, node, **kwargs):
node.config_addons(request.json, auth)
@must_be_valid_project
@must_have_permission(READ)
def node_contributors(auth, node, **kwargs):
ret = _view_project(node, auth, primary=True)
ret['contributors'] = utils.serialize_contributors(node.contributors, node)
ret['adminContributors'] = utils.serialize_contributors(node.admin_contributors, node, admin=True)
return ret
@must_have_permission(ADMIN)
def configure_comments(node, **kwargs):
comment_level = request.json.get('commentLevel')
if not comment_level:
node.comment_level = None
elif comment_level in ['public', 'private']:
node.comment_level = comment_level
else:
raise HTTPError(http.BAD_REQUEST)
node.save()
##############################################################################
# View Project
##############################################################################
@must_be_valid_project(retractions_valid=True)
@must_be_contributor_or_public
def view_project(auth, node, **kwargs):
primary = '/api/v1' not in request.path
ret = _view_project(node, auth, primary=primary)
ret['addon_capabilities'] = settings.ADDON_CAPABILITIES
# Collect the URIs to the static assets for addons that have widgets
ret['addon_widget_js'] = list(collect_addon_js(
node,
filename='widget-cfg.js',
config_entry='widget'
))
ret.update(rubeus.collect_addon_assets(node))
return ret
# Expand/Collapse
@must_be_valid_project
@must_be_contributor_or_public
def expand(auth, node, **kwargs):
node.expand(user=auth.user)
return {}, 200, None
@must_be_valid_project
@must_be_contributor_or_public
def collapse(auth, node, **kwargs):
node.collapse(user=auth.user)
return {}, 200, None
# Reorder components
@must_be_valid_project
@must_not_be_registration
@must_have_permission(WRITE)
def project_reorder_components(node, **kwargs):
"""Reorders the components in a project's component list.
:param-json list new_list: List of strings that include node IDs and
node type delimited by ':'.
"""
# TODO(sloria): Change new_list parameter to be an array of objects
# {
# 'newList': {
# {'key': 'abc123', 'type': 'node'}
# }
# }
new_list = [
tuple(n.split(':'))
for n in request.json.get('new_list', [])
]
nodes_new = [
StoredObject.get_collection(schema).load(key)
for key, schema in new_list
]
valid_nodes = [
n for n in node.nodes
if not n.is_deleted
]
deleted_nodes = [
n for n in node.nodes
if n.is_deleted
]
if len(valid_nodes) == len(nodes_new) and set(valid_nodes) == set(nodes_new):
node.nodes = nodes_new + deleted_nodes
node.save()
return {}
logger.error('Got invalid node list in reorder components')
raise HTTPError(http.BAD_REQUEST)
##############################################################################
@must_be_valid_project
@must_be_contributor_or_public
def project_statistics(auth, node, **kwargs):
if not (node.can_edit(auth) or node.is_public):
raise HTTPError(http.FORBIDDEN)
return _view_project(node, auth, primary=True)
###############################################################################
# Make Private/Public
###############################################################################
@must_be_valid_project
@must_have_permission(ADMIN)
def project_before_set_public(node, **kwargs):
prompt = node.callback('before_make_public')
anonymous_link_warning = any(private_link.anonymous for private_link in node.private_links_active)
if anonymous_link_warning:
prompt.append('Anonymized view-only links <b>DO NOT</b> anonymize '
'contributors after a project or component is made public.')
return {
'prompts': prompt
}
@must_be_valid_project
@must_have_permission(ADMIN)
def project_set_privacy(auth, node, **kwargs):
permissions = kwargs.get('permissions')
if permissions is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.set_privacy(permissions, auth)
except NodeStateError as e:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_short="Can't change privacy",
message_long=e.message
))
return {
'status': 'success',
'permissions': permissions,
}
@must_be_valid_project
@must_be_contributor_or_public
@must_not_be_registration
def watch_post(auth, node, **kwargs):
user = auth.user
watch_config = WatchConfig(node=node,
digest=request.json.get('digest', False),
immediate=request.json.get('immediate', False))
try:
user.watch(watch_config)
except ValueError: # Node is already being watched
raise HTTPError(http.BAD_REQUEST)
user.save()
return {
'status': 'success',
'watchCount': len(node.watchconfig__watched)
}
@must_be_valid_project
@must_be_contributor_or_public
@must_not_be_registration
def unwatch_post(auth, node, **kwargs):
user = auth.user
watch_config = WatchConfig(node=node,
digest=request.json.get('digest', False),
immediate=request.json.get('immediate', False))
try:
user.unwatch(watch_config)
except ValueError: # Node isn't being watched
raise HTTPError(http.BAD_REQUEST)
return {
'status': 'success',
'watchCount': len(node.watchconfig__watched)
}
@must_be_valid_project
@must_be_contributor_or_public
@must_not_be_registration
def togglewatch_post(auth, node, **kwargs):
'''View for toggling watch mode for a node.'''
# TODO: refactor this, watch_post, unwatch_post (@mambocab)
user = auth.user
watch_config = WatchConfig(
node=node,
digest=request.json.get('digest', False),
immediate=request.json.get('immediate', False)
)
try:
if user.is_watching(node):
user.unwatch(watch_config)
else:
user.watch(watch_config)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
user.save()
return {
'status': 'success',
'watchCount': len(node.watchconfig__watched),
'watched': user.is_watching(node)
}
@must_be_valid_project
@must_not_be_registration
@must_have_permission(WRITE)
def update_node(auth, node, **kwargs):
# in node.update() method there is a key list node.WRITABLE_WHITELIST only allow user to modify
# category, title, and discription which can be edited by write permission contributor
try:
return {
'updated_fields': {
key: getattr(node, key)
for key in
node.update(request.get_json(), auth=auth)
}
}
except NodeUpdateError as e:
raise HTTPError(400, data=dict(
message_short="Failed to update attribute '{0}'".format(e.key),
message_long=e.reason
))
@must_be_valid_project
@must_have_permission(ADMIN)
@must_not_be_registration
def component_remove(auth, node, **kwargs):
"""Remove component, and recursively remove its children. If node has a
parent, add log and redirect to parent; else redirect to user dashboard.
"""
try:
node.remove_node(auth)
except NodeStateError as e:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': 'Error',
'message_long': 'Could not delete component: ' + e.message
},
)
node.save()
message = '{} deleted'.format(
node.project_or_component.capitalize()
)
status.push_status_message(message, 'success')
parent = node.parent_node
if parent and parent.can_view(auth):
redirect_url = node.node__parent[0].url
else:
redirect_url = '/dashboard/'
return {
'url': redirect_url,
}
@must_have_permission(ADMIN)
@must_not_be_registration
def delete_folder(auth, node, **kwargs):
"""Remove folder node
"""
if node is None:
raise HTTPError(http.BAD_REQUEST)
if not node.is_folder or node.is_dashboard:
raise HTTPError(http.BAD_REQUEST)
try:
node.remove_node(auth)
except NodeStateError as e:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': 'Error',
'message_long': 'Could not delete component: ' + e.message
},
)
return {}
@must_be_valid_project
@must_have_permission(ADMIN)
def remove_private_link(*args, **kwargs):
link_id = request.json['private_link_id']
try:
link = PrivateLink.load(link_id)
link.is_deleted = True
link.save()
except ModularOdmException:
raise HTTPError(http.NOT_FOUND)
# TODO: Split into separate functions
def _render_addon(node):
widgets = {}
configs = {}
js = []
css = []
for addon in node.get_addons():
configs[addon.config.short_name] = addon.config.to_json()
js.extend(addon.config.include_js.get('widget', []))
css.extend(addon.config.include_css.get('widget', []))
js.extend(addon.config.include_js.get('files', []))
css.extend(addon.config.include_css.get('files', []))
return widgets, configs, js, css
def _should_show_wiki_widget(node, user):
has_wiki = bool(node.get_addon('wiki'))
wiki_page = node.get_wiki_page('home', None)
if not node.has_permission(user, 'write'):
return has_wiki and wiki_page and wiki_page.html(node)
else:
return has_wiki
def _view_project(node, auth, primary=False):
"""Build a JSON object containing everything needed to render
project.view.mako.
"""
user = auth.user
parent = node.parent_node
if user:
dashboard = find_dashboard(user)
dashboard_id = dashboard._id
in_dashboard = dashboard.pointing_at(node._primary_key) is not None
else:
in_dashboard = False
dashboard_id = ''
view_only_link = auth.private_key or request.args.get('view_only', '').strip('/')
anonymous = has_anonymous_link(node, auth)
widgets, configs, js, css = _render_addon(node)
redirect_url = node.url + '?view_only=None'
# Before page load callback; skip if not primary call
if primary:
for addon in node.get_addons():
messages = addon.before_page_load(node, user) or []
for message in messages:
status.push_status_message(message, 'info', dismissible=False)
data = {
'node': {
'id': node._primary_key,
'title': node.title,
'category': node.category_display,
'category_short': node.category,
'node_type': node.project_or_component,
'description': node.description or '',
'url': node.url,
'api_url': node.api_url,
'absolute_url': node.absolute_url,
'redirect_url': redirect_url,
'display_absolute_url': node.display_absolute_url,
'update_url': node.api_url_for('update_node'),
'in_dashboard': in_dashboard,
'is_public': node.is_public,
'is_archiving': node.archiving,
'date_created': iso8601format(node.date_created),
'date_modified': iso8601format(node.logs[-1].date) if node.logs else '',
'tags': [tag._primary_key for tag in node.tags],
'children': bool(node.nodes),
'is_registration': node.is_registration,
'is_retracted': node.is_retracted,
'pending_retraction': node.pending_retraction,
'retracted_justification': getattr(node.retraction, 'justification', None),
'embargo_end_date': node.embargo_end_date.strftime("%A, %b. %d, %Y") if node.embargo_end_date else False,
'pending_embargo': node.pending_embargo,
'registered_from_url': node.registered_from.url if node.is_registration else '',
'registered_date': iso8601format(node.registered_date) if node.is_registration else '',
'root_id': node.root._id,
'registered_meta': [
{
'name_no_ext': from_mongo(meta),
'name_clean': clean_template_name(meta),
}
for meta in node.registered_meta or []
],
'registration_count': len(node.node__registrations),
'is_fork': node.is_fork,
'forked_from_id': node.forked_from._primary_key if node.is_fork else '',
'forked_from_display_absolute_url': node.forked_from.display_absolute_url if node.is_fork else '',
'forked_date': iso8601format(node.forked_date) if node.is_fork else '',
'fork_count': len(node.forks),
'templated_count': len(node.templated_list),
'watched_count': len(node.watchconfig__watched),
'private_links': [x.to_json() for x in node.private_links_active],
'link': view_only_link,
'anonymous': anonymous,
'points': len(node.get_points(deleted=False, folders=False)),
'piwik_site_id': node.piwik_site_id,
'comment_level': node.comment_level,
'has_comments': bool(getattr(node, 'commented', [])),
'has_children': bool(getattr(node, 'commented', False)),
'identifiers': {
'doi': node.get_identifier_value('doi'),
'ark': node.get_identifier_value('ark'),
},
},
'parent_node': {
'exists': parent is not None,
'id': parent._primary_key if parent else '',
'title': parent.title if parent else '',
'category': parent.category_display if parent else '',
'url': parent.url if parent else '',
'api_url': parent.api_url if parent else '',
'absolute_url': parent.absolute_url if parent else '',
'registrations_url': parent.web_url_for('node_registrations') if parent else '',
'is_public': parent.is_public if parent else '',
'is_contributor': parent.is_contributor(user) if parent else '',
'can_view': parent.can_view(auth) if parent else False
},
'user': {
'is_contributor': node.is_contributor(user),
'is_admin_parent': parent.is_admin_parent(user) if parent else False,
'can_edit': (node.can_edit(auth)
and not node.is_registration),
'has_read_permissions': node.has_permission(user, 'read'),
'permissions': node.get_permissions(user) if user else [],
'is_watching': user.is_watching(node) if user else False,
'piwik_token': user.piwik_token if user else '',
'id': user._id if user else None,
'username': user.username if user else None,
'fullname': user.fullname if user else '',
'can_comment': node.can_comment(auth),
'show_wiki_widget': _should_show_wiki_widget(node, user),
'dashboard_id': dashboard_id,
},
'badges': _get_badge(user),
# TODO: Namespace with nested dicts
'addons_enabled': node.get_addon_names(),
'addons': configs,
'addon_widgets': widgets,
'addon_widget_js': js,
'addon_widget_css': css,
'node_categories': Node.CATEGORY_MAP,
}
return data
def _get_badge(user):
if user:
badger = user.get_addon('badges')
if badger:
return {
'can_award': badger.can_award,
'badges': badger.get_badges_json()
}
return {}
def _get_children(node, auth, indent=0):
children = []
for child in node.nodes_primary:
if not child.is_deleted and child.can_edit(auth):
children.append({
'id': child._primary_key,
'title': child.title,
'indent': indent,
'is_public': child.is_public,
'parent_id': child.parent_id,
})
children.extend(_get_children(child, auth, indent + 1))
return children
@must_be_valid_project
@must_have_permission(ADMIN)
def private_link_table(node, **kwargs):
data = {
'node': {
'absolute_url': node.absolute_url,
'private_links': [x.to_json() for x in node.private_links_active],
}
}
return data
@collect_auth
@must_be_valid_project
def get_editable_children(auth, node, **kwargs):
if not node.can_edit(auth):
return
children = _get_children(node, auth)
return {
'node': {'id': node._id, 'title': node.title, 'is_public': node.is_public},
'children': children,
}
def _get_user_activity(node, auth, rescale_ratio):
# Counters
total_count = len(node.logs)
# Note: It's typically much faster to find logs of a given node
# attached to a given user using node.logs.find(...) than by
# loading the logs into Python and checking each one. However,
# using deep caching might be even faster down the road.
if auth.user:
ua_count = node.logs.find(Q('user', 'eq', auth.user)).count()
else:
ua_count = 0
non_ua_count = total_count - ua_count # base length of blue bar
# Normalize over all nodes
try:
ua = ua_count / rescale_ratio * 100
except ZeroDivisionError:
ua = 0
try:
non_ua = non_ua_count / rescale_ratio * 100
except ZeroDivisionError:
non_ua = 0
return ua_count, ua, non_ua
@must_be_valid_project
def get_recent_logs(node, **kwargs):
logs = list(reversed(node.logs._to_primary_keys()))[:3]
return {'logs': logs}
def _get_summary(node, auth, rescale_ratio, primary=True, link_id=None, show_path=False):
# TODO(sloria): Refactor this or remove (lots of duplication with _view_project)
summary = {
'id': link_id if link_id else node._id,
'primary': primary,
'is_registration': node.is_registration,
'is_fork': node.is_fork,
'is_retracted': node.is_retracted,
'pending_retraction': node.pending_retraction,
'embargo_end_date': node.embargo_end_date.strftime("%A, %b. %d, %Y") if node.embargo_end_date else False,
'pending_embargo': node.pending_embargo,
'archiving': node.archiving,
}
if node.can_view(auth):
summary.update({
'can_view': True,
'can_edit': node.can_edit(auth),
'primary_id': node._id,
'url': node.url,
'primary': primary,
'api_url': node.api_url,
'title': node.title,
'category': node.category,
'node_type': node.project_or_component,
'is_registration': node.is_registration,
'anonymous': has_anonymous_link(node, auth),
'registered_date': node.registered_date.strftime('%Y-%m-%d %H:%M UTC')
if node.is_registration
else None,
'nlogs': None,
'ua_count': None,
'ua': None,
'non_ua': None,
'addons_enabled': node.get_addon_names(),
'is_public': node.is_public,
'parent_title': node.parent_node.title if node.parent_node else None,
'parent_is_public': node.parent_node.is_public if node.parent_node else False,
'show_path': show_path
})
if rescale_ratio:
ua_count, ua, non_ua = _get_user_activity(node, auth, rescale_ratio)
summary.update({
'nlogs': len(node.logs),
'ua_count': ua_count,
'ua': ua,
'non_ua': non_ua,
})
else:
summary['can_view'] = False
# TODO: Make output format consistent with _view_project
return {
'summary': summary,
}
@collect_auth
@must_be_valid_project(retractions_valid=True)
def get_summary(auth, node, **kwargs):
rescale_ratio = kwargs.get('rescale_ratio')
if rescale_ratio is None and request.args.get('rescale_ratio'):
try:
rescale_ratio = float(request.args.get('rescale_ratio'))
except (TypeError, ValueError):
raise HTTPError(http.BAD_REQUEST)
primary = kwargs.get('primary')
link_id = kwargs.get('link_id')
show_path = kwargs.get('show_path', False)
return _get_summary(
node, auth, rescale_ratio, primary=primary, link_id=link_id, show_path=show_path
)
@must_be_contributor_or_public
def get_children(auth, node, **kwargs):
user = auth.user
if request.args.get('permissions'):
perm = request.args['permissions'].lower().strip()
nodes = [
each
for each in node.nodes
if perm in each.get_permissions(user) and not each.is_deleted
]
else:
nodes = [
each
for each in node.nodes
if not each.is_deleted
]
return _render_nodes(nodes, auth)
@must_be_contributor_or_public
def get_folder_pointers(auth, node, **kwargs):
if not node.is_folder:
return []
nodes = [
each.resolve()._id
for each in node.nodes
if each is not None and not each.is_deleted and not each.primary
]
return nodes
@must_be_contributor_or_public
def get_forks(auth, node, **kwargs):
return _render_nodes(nodes=node.forks, auth=auth)
@must_be_contributor_or_public
def get_registrations(auth, node, **kwargs):
registrations = [n for n in node.node__registrations if not n.is_deleted] # get all registrations, including archiving
return _render_nodes(registrations, auth)
@must_be_valid_project
@must_have_permission(ADMIN)
def project_generate_private_link_post(auth, node, **kwargs):
""" creata a new private link object and add it to the node and its selected children"""
node_ids = request.json.get('node_ids', [])
name = request.json.get('name', '')
anonymous = request.json.get('anonymous', False)
if node._id not in node_ids:
node_ids.insert(0, node._id)
nodes = [Node.load(node_id) for node_id in node_ids]
has_public_node = any(node.is_public for node in nodes)
new_link = new_private_link(
name=name, user=auth.user, nodes=nodes, anonymous=anonymous
)
if anonymous and has_public_node:
status.push_status_message(
'Anonymized view-only links <b>DO NOT</b> '
'anonymize contributors of public project or component.'
)
return new_link
@must_be_valid_project
@must_have_permission(ADMIN)
def project_private_link_edit(auth, **kwargs):
new_name = request.json.get('value', '')
private_link_id = request.json.get('pk', '')
private_link = PrivateLink.load(private_link_id)
if private_link:
private_link.name = new_name
private_link.save()
def _serialize_node_search(node):
"""Serialize a node for use in pointer search.
:param Node node: Node to serialize
:return: Dictionary of node data
"""
title = node.title
if node.is_registration:
title += ' (registration)'
first_author = node.visible_contributors[0]
return {
'id': node._id,
'title': title,
'firstAuthor': first_author.family_name or first_author.given_name or first_author.full_name,
'etal': len(node.visible_contributors) > 1,
}
@must_be_logged_in
def search_node(auth, **kwargs):
"""
"""
# Get arguments
node = Node.load(request.json.get('nodeId'))
include_public = request.json.get('includePublic')
size = float(request.json.get('size', '5').strip())
page = request.json.get('page', 0)
query = request.json.get('query', '').strip()
start = (page * size)
if not query:
return {'nodes': []}
# Build ODM query
title_query = Q('title', 'icontains', query)
not_deleted_query = Q('is_deleted', 'eq', False)
visibility_query = Q('contributors', 'eq', auth.user)
no_folders_query = Q('is_folder', 'eq', False)
if include_public:
visibility_query = visibility_query | Q('is_public', 'eq', True)
odm_query = title_query & not_deleted_query & visibility_query & no_folders_query
# Exclude current node from query if provided
if node:
nin = [node._id] + node.node_ids
odm_query = (
odm_query &
Q('_id', 'nin', nin)
)
nodes = Node.find(odm_query)
count = nodes.count()
pages = math.ceil(count / size)
validate_page_num(page, pages)
return {
'nodes': [
_serialize_node_search(each)
for each in islice(nodes, start, start + size)
if each.contributors
],
'total': count,
'pages': pages,
'page': page
}
def _add_pointers(node, pointers, auth):
"""
:param Node node: Node to which pointers will be added
:param list pointers: Nodes to add as pointers
"""
added = False
for pointer in pointers:
node.add_pointer(pointer, auth, save=False)
added = True
if added:
node.save()
@collect_auth
def move_pointers(auth):
"""Move pointer from one node to another node.
"""
from_node_id = request.json.get('fromNodeId')
to_node_id = request.json.get('toNodeId')
pointers_to_move = request.json.get('pointerIds')
if from_node_id is None or to_node_id is None or pointers_to_move is None:
raise HTTPError(http.BAD_REQUEST)
from_node = Node.load(from_node_id)
to_node = Node.load(to_node_id)
if to_node is None or from_node is None:
raise HTTPError(http.BAD_REQUEST)
for pointer_to_move in pointers_to_move:
pointer_id = from_node.pointing_at(pointer_to_move)
pointer_node = Node.load(pointer_to_move)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
from_node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
from_node.save()
try:
_add_pointers(to_node, [pointer_node], auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {}, 200, None
@collect_auth
def add_pointer(auth):
"""Add a single pointer to a node using only JSON parameters
"""
to_node_id = request.json.get('toNodeID')
pointer_to_move = request.json.get('pointerID')
if not (to_node_id and pointer_to_move):
raise HTTPError(http.BAD_REQUEST)
pointer = Node.load(pointer_to_move)
to_node = Node.load(to_node_id)
try:
_add_pointers(to_node, [pointer], auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
@must_have_permission(WRITE)
@must_not_be_registration
def add_pointers(auth, node, **kwargs):
"""Add pointers to a node.
"""
node_ids = request.json.get('nodeIds')
if not node_ids:
raise HTTPError(http.BAD_REQUEST)
nodes = [
Node.load(node_id)
for node_id in node_ids
]
try:
_add_pointers(node, nodes, auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {}
@must_have_permission(WRITE)
@must_not_be_registration
def remove_pointer(auth, node, **kwargs):
"""Remove a pointer from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
# TODO: since these a delete request, shouldn't use request body. put pointer
# id in the URL instead
pointer_id = request.json.get('pointerId')
if pointer_id is None:
raise HTTPError(http.BAD_REQUEST)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
node.save()
@must_be_valid_project # injects project
@must_have_permission(WRITE)
@must_not_be_registration
def remove_pointer_from_folder(auth, node, pointer_id, **kwargs):
"""Remove a pointer from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
if pointer_id is None:
raise HTTPError(http.BAD_REQUEST)
pointer_id = node.pointing_at(pointer_id)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
node.save()
@must_be_valid_project # injects project
@must_have_permission(WRITE)
@must_not_be_registration
def remove_pointers_from_folder(auth, node, **kwargs):
"""Remove multiple pointers from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
pointer_ids = request.json.get('pointerIds')
if pointer_ids is None:
raise HTTPError(http.BAD_REQUEST)
for pointer_id in pointer_ids:
pointer_id = node.pointing_at(pointer_id)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
node.save()
@must_have_permission(WRITE)
@must_not_be_registration
def fork_pointer(auth, node, **kwargs):
"""Fork a pointer. Raises BAD_REQUEST if pointer not provided, not found,
or not present in `nodes`.
"""
pointer_id = request.json.get('pointerId')
pointer = Pointer.load(pointer_id)
if pointer is None:
# TODO: Change this to 404?
raise HTTPError(http.BAD_REQUEST)
try:
node.fork_pointer(pointer, auth=auth, save=True)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
def abbrev_authors(node):
lead_author = node.visible_contributors[0]
ret = lead_author.family_name or lead_author.given_name or lead_author.fullname
if len(node.visible_contributor_ids) > 1:
ret += ' et al.'
return ret
def serialize_pointer(pointer, auth):
node = get_pointer_parent(pointer)
if node.can_view(auth):
return {
'id': node._id,
'url': node.url,
'title': node.title,
'authorShort': abbrev_authors(node),
}
return {
'url': None,
'title': 'Private Component',
'authorShort': 'Private Author(s)',
}
@must_be_contributor_or_public
def get_pointed(auth, node, **kwargs):
"""View that returns the pointers for a project."""
# exclude folders
return {'pointed': [
serialize_pointer(each, auth)
for each in node.pointed
if not get_pointer_parent(each).is_folder
]}
| apache-2.0 |
FCP-INDI/nipype | nipype/utils/onetime.py | 10 | 2622 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Descriptor support for NIPY.
Utilities to support special Python descriptors [1,2], in particular the use of
a useful pattern for properties we call 'one time properties'. These are
object attributes which are declared as properties, but become regular
attributes once they've been read the first time. They can thus be evaluated
later in the object's life cycle, but once evaluated they become normal, static
attributes with no function call overhead on access or any other constraints.
References
----------
[1] How-To Guide for Descriptors, Raymond
Hettinger. http://users.rcn.com/python/download/Descriptor.htm
[2] Python data model, http://docs.python.org/reference/datamodel.html
"""
from builtins import object
class OneTimeProperty(object):
"""A descriptor to make special properties that become normal attributes.
"""
def __init__(self, func):
"""Create a OneTimeProperty instance.
Parameters
----------
func : method
The method that will be called the first time to compute a value.
Afterwards, the method's name will be a standard attribute holding
the value of this computation.
"""
self.getter = func
self.name = func.__name__
def __get__(self, obj, type=None):
""" Called on attribute access on the class or instance. """
if obj is None:
# Being called on the class, return the original function. This way,
# introspection works on the class.
return self.getter
val = self.getter(obj)
# print "** setattr_on_read - loading '%s'" % self.name # dbg
setattr(obj, self.name, val)
return val
def setattr_on_read(func):
# XXX - beetter names for this?
# - cor_property (copy on read property)
# - sor_property (set on read property)
# - prop2attr_on_read
# ... ?
"""Decorator to create OneTimeProperty attributes.
Parameters
----------
func : method
The method that will be called the first time to compute a value.
Afterwards, the method's name will be a standard attribute holding the
value of this computation.
Examples
--------
>>> class MagicProp(object):
... @setattr_on_read
... def a(self):
... return 99
...
>>> x = MagicProp()
>>> 'a' in x.__dict__
False
>>> x.a
99
>>> 'a' in x.__dict__
True
"""
return OneTimeProperty(func)
| bsd-3-clause |
danmar/cppcheck | test/bug-hunting/cve.py | 2 | 2063 | # Test if --bug-hunting works using cve tests
import glob
import logging
import os
import sys
import subprocess
if sys.argv[0] in ('test/bug-hunting/cve.py', './test/bug-hunting/cve.py'):
CPPCHECK_PATH = './cppcheck'
TEST_SUITE = 'test/bug-hunting/cve'
else:
CPPCHECK_PATH = '../../cppcheck'
TEST_SUITE = 'cve'
slow = '--slow' in sys.argv
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s', datefmt='%H:%M:%S')
def test(test_folder):
logging.info(test_folder)
cmd_file = os.path.join(test_folder, 'cmd.txt')
expected_file = os.path.join(test_folder, 'expected.txt')
cmd = ['nice',
CPPCHECK_PATH,
'-D__GNUC__',
'--bug-hunting',
'--inconclusive',
'--platform=unix64',
'--template={file}:{line}:{id}',
'-rp=' + test_folder]
if os.path.isfile(cmd_file):
for line in open(cmd_file, 'rt'):
if len(line) > 1:
cmd.append(line.strip())
cmd.append(test_folder)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
comm = p.communicate()
stdout = comm[0].decode(encoding='utf-8', errors='ignore')
stderr = comm[1].decode(encoding='utf-8', errors='ignore')
with open(expected_file, 'rt') as f:
for expected in f.readlines():
if expected.strip() not in stderr.split('\n'):
print('FAILED. Expected result not found: ' + expected)
print('Command:')
print(' '.join(cmd))
print('Output:')
print(stderr)
sys.exit(1)
if (slow is False) and len(sys.argv) > 1:
test(sys.argv[1])
sys.exit(0)
SLOW = []
for test_folder in sorted(glob.glob(TEST_SUITE + '/CVE*')):
if slow is False:
check = False
for s in SLOW:
if s in test_folder:
check = True
if check is True:
logging.info('skipping %s', test_folder)
continue
test(test_folder)
| gpl-3.0 |
qedsoftware/commcare-hq | corehq/apps/sms/migrations/0016_add_phonenumber.py | 1 | 1393 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import dimagi.utils.couch.migration
class Migration(migrations.Migration):
dependencies = [
('sms', '0015_rename_phonenumber_to_phoneblacklist'),
]
operations = [
migrations.CreateModel(
name='PhoneNumber',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('couch_id', models.CharField(max_length=126, null=True, db_index=True)),
('domain', models.CharField(max_length=126, null=True, db_index=True)),
('owner_doc_type', models.CharField(max_length=126, null=True)),
('owner_id', models.CharField(max_length=126, null=True, db_index=True)),
('phone_number', models.CharField(max_length=126, null=True, db_index=True)),
('backend_id', models.CharField(max_length=126, null=True)),
('ivr_backend_id', models.CharField(max_length=126, null=True)),
('verified', models.NullBooleanField(default=False)),
('contact_last_modified', models.DateTimeField(null=True)),
],
options={
},
bases=(dimagi.utils.couch.migration.SyncSQLToCouchMixin, models.Model),
),
]
| bsd-3-clause |
chriscauley/django-shop | setup.py | 13 | 1131 | from setuptools import setup, find_packages
import os
import shop
CLASSIFIERS = [
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
]
setup(
author="Christopher Glass",
author_email="tribaal@gmail.com",
name='django-shop',
version=shop.__version__,
description='An Advanced Django Shop',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
url='http://www.django-shop.org/',
license='BSD License',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
install_requires=[
'Django>=1.4',
'django-classy-tags>=0.3.3',
'django-polymorphic>=0.2',
'south>=0.7.2',
'jsonfield>=0.9.6'
],
packages=find_packages(exclude=["example", "example.*"]),
include_package_data=True,
zip_safe=False,
)
| bsd-3-clause |
jvanz/core | wizards/com/sun/star/wizards/web/export/ConfiguredExporter.py | 9 | 1837 | #
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# This file incorporates work covered by the following license notice:
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed
# with this work for additional information regarding copyright
# ownership. The ASF licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0 .
#
from ..data.CGArgument import CGArgument
from ..data.CGExporter import CGExporter
from .FilterExporter import FilterExporter
class ConfiguredExporter(FilterExporter):
# (non-Javadoc)
# @see com.sun.star.wizards.web.export.Exporter#init(com.sun.star.wizards.web.data.CGExporter)
def __init(exporter):
super(ConfiguredExporter, self).__init__(exporter)
for key in exporter.cp_Arguments.childrenMap.keys():
if (not key == "Filter"):
value = exporter.cp_Arguments.getElement(key)
self.props[key] = self.cast(value.cp_Value)
def cast(s):
s1 = s[1]
c = s[0]
if (c == "$"):
return s1
elif (c == "%"):
return int(s1)
elif (c == "#"):
return int(s1)
elif (c == "&"):
return float(s1)
elif (c == "f"):
if (s == "false"):
return False
elif (c == "t"):
if (s == "true"):
return True
return None
| gpl-3.0 |
alberto-antonietti/nest-simulator | pynest/examples/Potjans_2014/stimulus_params.py | 19 | 2283 | # -*- coding: utf-8 -*-
#
# stimulus_params.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
""" PyNEST Microcircuit: Stimulus Parameters
-----------------------------------------------
A dictionary with parameters for an optional external transient stimulation.
Thalamic input and DC input can be switched on individually.
"""
import numpy as np
stim_dict = {
# optional thalamic input
# turn thalamic input on or off (True or False)
'thalamic_input': False,
# start of the thalamic input (in ms)
'th_start': 700.0,
# duration of the thalamic input (in ms)
'th_duration': 10.0,
# rate of the thalamic input (in spikes/s)
'th_rate': 120.0,
# number of thalamic neurons
'num_th_neurons': 902,
# connection probabilities of the thalamus to the different populations
# (same order as in 'populations' in 'net_dict')
'conn_probs_th':
np.array([0.0, 0.0, 0.0983, 0.0619, 0.0, 0.0, 0.0512, 0.0196]),
# mean amplitude of the thalamic postsynaptic potential (in mV),
# standard deviation will be taken from 'net_dict'
'PSP_th': 0.15,
# mean delay of the thalamic input (in ms)
'delay_th_mean': 1.5,
# relative standard deviation of the thalamic delay (in ms)
'delay_th_rel_std': 0.5,
# optional DC input
# turn DC input on or off (True or False)
'dc_input': False,
# start of the DC input (in ms)
'dc_start': 650.0,
# duration of the DC input (in ms)
'dc_dur': 100.0,
# amplitude of the DC input (in pA); final amplitude is population-specific
# and will be obtained by multiplication with 'K_ext'
'dc_amp': 0.3}
| gpl-2.0 |
pypa/pip | tests/functional/test_build_env.py | 4 | 7568 | from textwrap import dedent
import pytest
from pip._internal.build_env import BuildEnvironment
from tests.lib import create_basic_wheel_for_package, make_test_finder
def indent(text, prefix):
return '\n'.join((prefix if line else '') + line
for line in text.split('\n'))
def run_with_build_env(script, setup_script_contents,
test_script_contents=None):
build_env_script = script.scratch_path / 'build_env.py'
build_env_script.write_text(
dedent(
'''
import subprocess
import sys
from pip._internal.build_env import BuildEnvironment
from pip._internal.index.collector import LinkCollector
from pip._internal.index.package_finder import PackageFinder
from pip._internal.models.search_scope import SearchScope
from pip._internal.models.selection_prefs import (
SelectionPreferences
)
from pip._internal.network.session import PipSession
from pip._internal.utils.temp_dir import global_tempdir_manager
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope.create([{scratch!r}], []),
)
selection_prefs = SelectionPreferences(
allow_yanked=True,
)
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=selection_prefs,
)
with global_tempdir_manager():
build_env = BuildEnvironment()
'''.format(scratch=str(script.scratch_path))) +
indent(dedent(setup_script_contents), ' ') +
indent(
dedent(
'''
if len(sys.argv) > 1:
with build_env:
subprocess.check_call((sys.executable, sys.argv[1]))
'''
),
' '
)
)
args = ['python', build_env_script]
if test_script_contents is not None:
test_script = script.scratch_path / 'test.py'
test_script.write_text(dedent(test_script_contents))
args.append(test_script)
return script.run(*args)
def test_build_env_allow_empty_requirements_install():
build_env = BuildEnvironment()
for prefix in ('normal', 'overlay'):
build_env.install_requirements(None, [], prefix, None)
def test_build_env_allow_only_one_install(script):
create_basic_wheel_for_package(script, 'foo', '1.0')
create_basic_wheel_for_package(script, 'bar', '1.0')
finder = make_test_finder(find_links=[script.scratch_path])
build_env = BuildEnvironment()
for prefix in ('normal', 'overlay'):
build_env.install_requirements(
finder, ['foo'], prefix,
f'installing foo in {prefix}')
with pytest.raises(AssertionError):
build_env.install_requirements(
finder, ['bar'], prefix,
f'installing bar in {prefix}')
with pytest.raises(AssertionError):
build_env.install_requirements(
finder, [], prefix,
f'installing in {prefix}')
def test_build_env_requirements_check(script):
create_basic_wheel_for_package(script, 'foo', '2.0')
create_basic_wheel_for_package(script, 'bar', '1.0')
create_basic_wheel_for_package(script, 'bar', '3.0')
create_basic_wheel_for_package(script, 'other', '0.5')
script.pip_install_local('-f', script.scratch_path, 'foo', 'bar', 'other')
run_with_build_env(
script,
'''
r = build_env.check_requirements(['foo', 'bar', 'other'])
assert r == (set(), {'foo', 'bar', 'other'}), repr(r)
r = build_env.check_requirements(['foo>1.0', 'bar==3.0'])
assert r == (set(), {'foo>1.0', 'bar==3.0'}), repr(r)
r = build_env.check_requirements(['foo>3.0', 'bar>=2.5'])
assert r == (set(), {'foo>3.0', 'bar>=2.5'}), repr(r)
''')
run_with_build_env(
script,
'''
build_env.install_requirements(finder, ['foo', 'bar==3.0'], 'normal',
'installing foo in normal')
r = build_env.check_requirements(['foo', 'bar', 'other'])
assert r == (set(), {'other'}), repr(r)
r = build_env.check_requirements(['foo>1.0', 'bar==3.0'])
assert r == (set(), set()), repr(r)
r = build_env.check_requirements(['foo>3.0', 'bar>=2.5'])
assert r == ({('foo==2.0', 'foo>3.0')}, set()), repr(r)
''')
run_with_build_env(
script,
'''
build_env.install_requirements(finder, ['foo', 'bar==3.0'], 'normal',
'installing foo in normal')
build_env.install_requirements(finder, ['bar==1.0'], 'overlay',
'installing foo in overlay')
r = build_env.check_requirements(['foo', 'bar', 'other'])
assert r == (set(), {'other'}), repr(r)
r = build_env.check_requirements(['foo>1.0', 'bar==3.0'])
assert r == ({('bar==1.0', 'bar==3.0')}, set()), repr(r)
r = build_env.check_requirements(['foo>3.0', 'bar>=2.5'])
assert r == ({('bar==1.0', 'bar>=2.5'), ('foo==2.0', 'foo>3.0')}, \
set()), repr(r)
''')
def test_build_env_overlay_prefix_has_priority(script):
create_basic_wheel_for_package(script, 'pkg', '2.0')
create_basic_wheel_for_package(script, 'pkg', '4.3')
result = run_with_build_env(
script,
'''
build_env.install_requirements(finder, ['pkg==2.0'], 'overlay',
'installing pkg==2.0 in overlay')
build_env.install_requirements(finder, ['pkg==4.3'], 'normal',
'installing pkg==4.3 in normal')
''',
'''
print(__import__('pkg').__version__)
''')
assert result.stdout.strip() == '2.0', str(result)
@pytest.mark.incompatible_with_test_venv
def test_build_env_isolation(script):
# Create dummy `pkg` wheel.
pkg_whl = create_basic_wheel_for_package(script, 'pkg', '1.0')
# Install it to site packages.
script.pip_install_local(pkg_whl)
# And a copy in the user site.
script.pip_install_local('--ignore-installed', '--user', pkg_whl)
# And to another directory available through a .pth file.
target = script.scratch_path / 'pth_install'
script.pip_install_local('-t', target, pkg_whl)
(script.site_packages_path / 'build_requires.pth').write_text(
str(target) + '\n'
)
# And finally to yet another directory available through PYTHONPATH.
target = script.scratch_path / 'pypath_install'
script.pip_install_local('-t', target, pkg_whl)
script.environ["PYTHONPATH"] = target
run_with_build_env(
script, '',
r'''
from distutils.sysconfig import get_python_lib
import sys
try:
import pkg
except ImportError:
pass
else:
print(
f'imported `pkg` from `{pkg.__file__}`',
file=sys.stderr)
print('system sites:\n ' + '\n '.join(sorted({
get_python_lib(plat_specific=0),
get_python_lib(plat_specific=1),
})), file=sys.stderr)
print('sys.path:\n ' + '\n '.join(sys.path), file=sys.stderr)
sys.exit(1)
''')
| mit |
yawnosnorous/python-for-android | python3-alpha/python3-src/Lib/plat-os2emx/_emx_link.py | 57 | 2499 | # _emx_link.py
# Written by Andrew I MacIntyre, December 2002.
"""_emx_link.py is a simplistic emulation of the Unix link(2) library routine
for creating so-called hard links. It is intended to be imported into
the os module in place of the unimplemented (on OS/2) Posix link()
function (os.link()).
We do this on OS/2 by implementing a file copy, with link(2) semantics:-
- the target cannot already exist;
- we hope that the actual file open (if successful) is actually
atomic...
Limitations of this approach/implementation include:-
- no support for correct link counts (EMX stat(target).st_nlink
is always 1);
- thread safety undefined;
- default file permissions (r+w) used, can't be over-ridden;
- implemented in Python so comparatively slow, especially for large
source files;
- need sufficient free disk space to store the copy.
Behaviour:-
- any exception should propagate to the caller;
- want target to be an exact copy of the source, so use binary mode;
- returns None, same as os.link() which is implemented in posixmodule.c;
- target removed in the event of a failure where possible;
- given the motivation to write this emulation came from trying to
support a Unix resource lock implementation, where minimal overhead
during creation of the target is desirable and the files are small,
we read a source block before attempting to create the target so that
we're ready to immediately write some data into it.
"""
import os
import errno
__all__ = ['link']
def link(source, target):
"""link(source, target) -> None
Attempt to hard link the source file to the target file name.
On OS/2, this creates a complete copy of the source file.
"""
s = os.open(source, os.O_RDONLY | os.O_BINARY)
if os.isatty(s):
raise OSError(errno.EXDEV, 'Cross-device link')
data = os.read(s, 1024)
try:
t = os.open(target, os.O_WRONLY | os.O_BINARY | os.O_CREAT | os.O_EXCL)
except OSError:
os.close(s)
raise
try:
while data:
os.write(t, data)
data = os.read(s, 1024)
except OSError:
os.close(s)
os.close(t)
os.unlink(target)
raise
os.close(s)
os.close(t)
if __name__ == '__main__':
import sys
try:
link(sys.argv[1], sys.argv[2])
except IndexError:
print('Usage: emx_link <source> <target>')
except OSError:
print('emx_link: %s' % str(sys.exc_info()[1]))
| apache-2.0 |
HBehrens/feedsanitizer | django/core/management/commands/startapp.py | 321 | 1909 | import os
from django.core.management.base import copy_helper, CommandError, LabelCommand
from django.utils.importlib import import_module
class Command(LabelCommand):
help = "Creates a Django app directory structure for the given app name in the current directory."
args = "[appname]"
label = 'application name'
requires_model_validation = False
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = False
def handle_label(self, app_name, directory=None, **options):
if directory is None:
directory = os.getcwd()
# Determine the project_name by using the basename of directory,
# which should be the full path of the project directory (or the
# current directory if no directory was passed).
project_name = os.path.basename(directory)
if app_name == project_name:
raise CommandError("You cannot create an app with the same name"
" (%r) as your project." % app_name)
# Check that the app_name cannot be imported.
try:
import_module(app_name)
except ImportError:
pass
else:
raise CommandError("%r conflicts with the name of an existing Python module and cannot be used as an app name. Please try another name." % app_name)
copy_helper(self.style, 'app', app_name, directory, project_name)
class ProjectCommand(Command):
help = ("Creates a Django app directory structure for the given app name"
" in this project's directory.")
def __init__(self, project_directory):
super(ProjectCommand, self).__init__()
self.project_directory = project_directory
def handle_label(self, app_name, **options):
super(ProjectCommand, self).handle_label(app_name, self.project_directory, **options)
| mit |
JaDogg/__py_playground | reference/parsley/doc/calc.py | 4 | 1088 | import math
from parsley import makeGrammar
def calculate(start, pairs):
result = start
for op, value in pairs:
if op == '+':
result += value
elif op == '-':
result -= value
elif op == '*':
result *= value
elif op == '/':
result /= value
return result
calcGrammar = """
number = <digit+>:ds -> int(ds)
parens = '(' ws expr:e ws ')' -> e
value = number | parens
ws = ' '*
add = '+' ws expr2:n -> ('+', n)
sub = '-' ws expr2:n -> ('-', n)
mul = '*' ws value:n -> ('*', n)
div = '/' ws value:n -> ('/', n)
addsub = ws (add | sub)
muldiv = ws (mul | div)
expr = expr2:left addsub*:right -> calculate(left, right)
expr2 = value:left muldiv*:right -> calculate(left, right)
"""
Calc = makeGrammar(calcGrammar, {"calculate": calculate}, name="Calc")
calcGrammarEx = """
value = super | constant
constant = 'pi' -> math.pi
| 'e' -> math.e
"""
CalcEx = makeGrammar(calcGrammarEx, {"math": math}, name="CalcEx",
extends=Calc)
| mit |
klenks/jobsportal | venv/lib/python2.7/site-packages/django/contrib/gis/geos/linestring.py | 136 | 6019 | from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.coordseq import GEOSCoordSeq
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry, LinearGeometryMixin
from django.contrib.gis.geos.point import Point
from django.contrib.gis.shortcuts import numpy
from django.utils.six.moves import range
class LineString(LinearGeometryMixin, GEOSGeometry):
_init_func = capi.create_linestring
_minlength = 2
has_cs = True
def __init__(self, *args, **kwargs):
"""
Initializes on the given sequence -- may take lists, tuples, NumPy arrays
of X,Y pairs, or Point objects. If Point objects are used, ownership is
_not_ transferred to the LineString object.
Examples:
ls = LineString((1, 1), (2, 2))
ls = LineString([(1, 1), (2, 2)])
ls = LineString(array([(1, 1), (2, 2)]))
ls = LineString(Point(1, 1), Point(2, 2))
"""
# If only one argument provided, set the coords array appropriately
if len(args) == 1:
coords = args[0]
else:
coords = args
if not (isinstance(coords, (tuple, list)) or numpy and isinstance(coords, numpy.ndarray)):
raise TypeError('Invalid initialization input for LineStrings.')
# If SRID was passed in with the keyword arguments
srid = kwargs.get('srid')
ncoords = len(coords)
if not ncoords:
super(LineString, self).__init__(self._init_func(None), srid=srid)
return
if ncoords < self._minlength:
raise ValueError(
'%s requires at least %d points, got %s.' % (
self.__class__.__name__,
self._minlength,
ncoords,
)
)
if isinstance(coords, (tuple, list)):
# Getting the number of coords and the number of dimensions -- which
# must stay the same, e.g., no LineString((1, 2), (1, 2, 3)).
ndim = None
# Incrementing through each of the coordinates and verifying
for coord in coords:
if not isinstance(coord, (tuple, list, Point)):
raise TypeError('Each coordinate should be a sequence (list or tuple)')
if ndim is None:
ndim = len(coord)
self._checkdim(ndim)
elif len(coord) != ndim:
raise TypeError('Dimension mismatch.')
numpy_coords = False
else:
shape = coords.shape # Using numpy's shape.
if len(shape) != 2:
raise TypeError('Too many dimensions.')
self._checkdim(shape[1])
ndim = shape[1]
numpy_coords = True
# Creating a coordinate sequence object because it is easier to
# set the points using GEOSCoordSeq.__setitem__().
cs = GEOSCoordSeq(capi.create_cs(ncoords, ndim), z=bool(ndim == 3))
for i in range(ncoords):
if numpy_coords:
cs[i] = coords[i, :]
elif isinstance(coords[i], Point):
cs[i] = coords[i].tuple
else:
cs[i] = coords[i]
# Calling the base geometry initialization with the returned pointer
# from the function.
super(LineString, self).__init__(self._init_func(cs.ptr), srid=srid)
def __iter__(self):
"Allows iteration over this LineString."
for i in range(len(self)):
yield self[i]
def __len__(self):
"Returns the number of points in this LineString."
return len(self._cs)
def _get_single_external(self, index):
return self._cs[index]
_get_single_internal = _get_single_external
def _set_list(self, length, items):
ndim = self._cs.dims
hasz = self._cs.hasz # I don't understand why these are different
# create a new coordinate sequence and populate accordingly
cs = GEOSCoordSeq(capi.create_cs(length, ndim), z=hasz)
for i, c in enumerate(items):
cs[i] = c
ptr = self._init_func(cs.ptr)
if ptr:
capi.destroy_geom(self.ptr)
self.ptr = ptr
self._post_init(self.srid)
else:
# can this happen?
raise GEOSException('Geometry resulting from slice deletion was invalid.')
def _set_single(self, index, value):
self._checkindex(index)
self._cs[index] = value
def _checkdim(self, dim):
if dim not in (2, 3):
raise TypeError('Dimension mismatch.')
# #### Sequence Properties ####
@property
def tuple(self):
"Returns a tuple version of the geometry from the coordinate sequence."
return self._cs.tuple
coords = tuple
def _listarr(self, func):
"""
Internal routine that returns a sequence (list) corresponding with
the given function. Will return a numpy array if possible.
"""
lst = [func(i) for i in range(len(self))]
if numpy:
return numpy.array(lst) # ARRRR!
else:
return lst
@property
def array(self):
"Returns a numpy array for the LineString."
return self._listarr(self._cs.__getitem__)
@property
def x(self):
"Returns a list or numpy array of the X variable."
return self._listarr(self._cs.getX)
@property
def y(self):
"Returns a list or numpy array of the Y variable."
return self._listarr(self._cs.getY)
@property
def z(self):
"Returns a list or numpy array of the Z variable."
if not self.hasz:
return None
else:
return self._listarr(self._cs.getZ)
# LinearRings are LineStrings used within Polygons.
class LinearRing(LineString):
_minlength = 4
_init_func = capi.create_linearring
| mit |
dhenrygithub/QGIS | python/ext-libs/markupsafe/__init__.py | 701 | 10338 | # -*- coding: utf-8 -*-
"""
markupsafe
~~~~~~~~~~
Implements a Markup string.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import re
import string
from collections import Mapping
from markupsafe._compat import text_type, string_types, int_types, \
unichr, iteritems, PY2
__all__ = ['Markup', 'soft_unicode', 'escape', 'escape_silent']
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
_entity_re = re.compile(r'&([^;]+);')
class Markup(text_type):
r"""Marks a string as being safe for inclusion in HTML/XML output without
needing to be escaped. This implements the `__html__` interface a couple
of frameworks and web applications use. :class:`Markup` is a direct
subclass of `unicode` and provides all the methods of `unicode` just that
it escapes arguments passed and always returns `Markup`.
The `escape` function returns markup objects so that double escaping can't
happen.
The constructor of the :class:`Markup` class can be used for three
different things: When passed an unicode object it's assumed to be safe,
when passed an object with an HTML representation (has an `__html__`
method) that representation is used, otherwise the object passed is
converted into a unicode string and then assumed to be safe:
>>> Markup("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
>>> class Foo(object):
... def __html__(self):
... return '<a href="#">foo</a>'
...
>>> Markup(Foo())
Markup(u'<a href="#">foo</a>')
If you want object passed being always treated as unsafe you can use the
:meth:`escape` classmethod to create a :class:`Markup` object:
>>> Markup.escape("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
Operations on a markup string are markup aware which means that all
arguments are passed through the :func:`escape` function:
>>> em = Markup("<em>%s</em>")
>>> em % "foo & bar"
Markup(u'<em>foo & bar</em>')
>>> strong = Markup("<strong>%(text)s</strong>")
>>> strong % {'text': '<blink>hacker here</blink>'}
Markup(u'<strong><blink>hacker here</blink></strong>')
>>> Markup("<em>Hello</em> ") + "<foo>"
Markup(u'<em>Hello</em> <foo>')
"""
__slots__ = ()
def __new__(cls, base=u'', encoding=None, errors='strict'):
if hasattr(base, '__html__'):
base = base.__html__()
if encoding is None:
return text_type.__new__(cls, base)
return text_type.__new__(cls, base, encoding, errors)
def __html__(self):
return self
def __add__(self, other):
if isinstance(other, string_types) or hasattr(other, '__html__'):
return self.__class__(super(Markup, self).__add__(self.escape(other)))
return NotImplemented
def __radd__(self, other):
if hasattr(other, '__html__') or isinstance(other, string_types):
return self.escape(other).__add__(self)
return NotImplemented
def __mul__(self, num):
if isinstance(num, int_types):
return self.__class__(text_type.__mul__(self, num))
return NotImplemented
__rmul__ = __mul__
def __mod__(self, arg):
if isinstance(arg, tuple):
arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
else:
arg = _MarkupEscapeHelper(arg, self.escape)
return self.__class__(text_type.__mod__(self, arg))
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
text_type.__repr__(self)
)
def join(self, seq):
return self.__class__(text_type.join(self, map(self.escape, seq)))
join.__doc__ = text_type.join.__doc__
def split(self, *args, **kwargs):
return list(map(self.__class__, text_type.split(self, *args, **kwargs)))
split.__doc__ = text_type.split.__doc__
def rsplit(self, *args, **kwargs):
return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs)))
rsplit.__doc__ = text_type.rsplit.__doc__
def splitlines(self, *args, **kwargs):
return list(map(self.__class__, text_type.splitlines(
self, *args, **kwargs)))
splitlines.__doc__ = text_type.splitlines.__doc__
def unescape(self):
r"""Unescape markup again into an text_type string. This also resolves
known HTML4 and XHTML entities:
>>> Markup("Main » <em>About</em>").unescape()
u'Main \xbb <em>About</em>'
"""
from markupsafe._constants import HTML_ENTITIES
def handle_match(m):
name = m.group(1)
if name in HTML_ENTITIES:
return unichr(HTML_ENTITIES[name])
try:
if name[:2] in ('#x', '#X'):
return unichr(int(name[2:], 16))
elif name.startswith('#'):
return unichr(int(name[1:]))
except ValueError:
pass
return u''
return _entity_re.sub(handle_match, text_type(self))
def striptags(self):
r"""Unescape markup into an text_type string and strip all tags. This
also resolves known HTML4 and XHTML entities. Whitespace is
normalized to one:
>>> Markup("Main » <em>About</em>").striptags()
u'Main \xbb About'
"""
stripped = u' '.join(_striptags_re.sub('', self).split())
return Markup(stripped).unescape()
@classmethod
def escape(cls, s):
"""Escape the string. Works like :func:`escape` with the difference
that for subclasses of :class:`Markup` this function would return the
correct subclass.
"""
rv = escape(s)
if rv.__class__ is not cls:
return cls(rv)
return rv
def make_simple_escaping_wrapper(name):
orig = getattr(text_type, name)
def func(self, *args, **kwargs):
args = _escape_argspec(list(args), enumerate(args), self.escape)
_escape_argspec(kwargs, iteritems(kwargs), self.escape)
return self.__class__(orig(self, *args, **kwargs))
func.__name__ = orig.__name__
func.__doc__ = orig.__doc__
return func
for method in '__getitem__', 'capitalize', \
'title', 'lower', 'upper', 'replace', 'ljust', \
'rjust', 'lstrip', 'rstrip', 'center', 'strip', \
'translate', 'expandtabs', 'swapcase', 'zfill':
locals()[method] = make_simple_escaping_wrapper(method)
# new in python 2.5
if hasattr(text_type, 'partition'):
def partition(self, sep):
return tuple(map(self.__class__,
text_type.partition(self, self.escape(sep))))
def rpartition(self, sep):
return tuple(map(self.__class__,
text_type.rpartition(self, self.escape(sep))))
# new in python 2.6
if hasattr(text_type, 'format'):
def format(*args, **kwargs):
self, args = args[0], args[1:]
formatter = EscapeFormatter(self.escape)
kwargs = _MagicFormatMapping(args, kwargs)
return self.__class__(formatter.vformat(self, args, kwargs))
def __html_format__(self, format_spec):
if format_spec:
raise ValueError('Unsupported format specification '
'for Markup.')
return self
# not in python 3
if hasattr(text_type, '__getslice__'):
__getslice__ = make_simple_escaping_wrapper('__getslice__')
del method, make_simple_escaping_wrapper
class _MagicFormatMapping(Mapping):
"""This class implements a dummy wrapper to fix a bug in the Python
standard library for string formatting.
See http://bugs.python.org/issue13598 for information about why
this is necessary.
"""
def __init__(self, args, kwargs):
self._args = args
self._kwargs = kwargs
self._last_index = 0
def __getitem__(self, key):
if key == '':
idx = self._last_index
self._last_index += 1
try:
return self._args[idx]
except LookupError:
pass
key = str(idx)
return self._kwargs[key]
def __iter__(self):
return iter(self._kwargs)
def __len__(self):
return len(self._kwargs)
if hasattr(text_type, 'format'):
class EscapeFormatter(string.Formatter):
def __init__(self, escape):
self.escape = escape
def format_field(self, value, format_spec):
if hasattr(value, '__html_format__'):
rv = value.__html_format__(format_spec)
elif hasattr(value, '__html__'):
if format_spec:
raise ValueError('No format specification allowed '
'when formatting an object with '
'its __html__ method.')
rv = value.__html__()
else:
rv = string.Formatter.format_field(self, value, format_spec)
return text_type(self.escape(rv))
def _escape_argspec(obj, iterable, escape):
"""Helper for various string-wrapped functions."""
for key, value in iterable:
if hasattr(value, '__html__') or isinstance(value, string_types):
obj[key] = escape(value)
return obj
class _MarkupEscapeHelper(object):
"""Helper for Markup.__mod__"""
def __init__(self, obj, escape):
self.obj = obj
self.escape = escape
__getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x], s.escape)
__unicode__ = __str__ = lambda s: text_type(s.escape(s.obj))
__repr__ = lambda s: str(s.escape(repr(s.obj)))
__int__ = lambda s: int(s.obj)
__float__ = lambda s: float(s.obj)
# we have to import it down here as the speedups and native
# modules imports the markup type which is define above.
try:
from markupsafe._speedups import escape, escape_silent, soft_unicode
except ImportError:
from markupsafe._native import escape, escape_silent, soft_unicode
if not PY2:
soft_str = soft_unicode
__all__.append('soft_str')
| gpl-2.0 |
duqiao/django | django/views/static.py | 190 | 5142 | """
Views and functions for serving static files. These are only to be used
during development, and SHOULD NOT be used in a production setting.
"""
from __future__ import unicode_literals
import mimetypes
import os
import posixpath
import re
import stat
from django.http import (
FileResponse, Http404, HttpResponse, HttpResponseNotModified,
HttpResponseRedirect,
)
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.utils.http import http_date, parse_http_date
from django.utils.six.moves.urllib.parse import unquote
from django.utils.translation import ugettext as _, ugettext_lazy
def serve(request, path, document_root=None, show_indexes=False):
"""
Serve static files below a given point in the directory structure.
To use, put a URL pattern such as::
from django.views.static import serve
url(r'^(?P<path>.*)$', serve, {'document_root': '/path/to/my/files/'})
in your URLconf. You must provide the ``document_root`` param. You may
also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
of the directory. This index view will use the template hardcoded below,
but if you'd like to override it, you can create a template called
``static/directory_index.html``.
"""
path = posixpath.normpath(unquote(path))
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Strip empty path components.
continue
drive, part = os.path.splitdrive(part)
head, part = os.path.split(part)
if part in (os.curdir, os.pardir):
# Strip '.' and '..' in path.
continue
newpath = os.path.join(newpath, part).replace('\\', '/')
if newpath and path != newpath:
return HttpResponseRedirect(newpath)
fullpath = os.path.join(document_root, newpath)
if os.path.isdir(fullpath):
if show_indexes:
return directory_index(newpath, fullpath)
raise Http404(_("Directory indexes are not allowed here."))
if not os.path.exists(fullpath):
raise Http404(_('"%(path)s" does not exist') % {'path': fullpath})
# Respect the If-Modified-Since header.
statobj = os.stat(fullpath)
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj.st_mtime, statobj.st_size):
return HttpResponseNotModified()
content_type, encoding = mimetypes.guess_type(fullpath)
content_type = content_type or 'application/octet-stream'
response = FileResponse(open(fullpath, 'rb'), content_type=content_type)
response["Last-Modified"] = http_date(statobj.st_mtime)
if stat.S_ISREG(statobj.st_mode):
response["Content-Length"] = statobj.st_size
if encoding:
response["Content-Encoding"] = encoding
return response
DEFAULT_DIRECTORY_INDEX_TEMPLATE = """
{% load i18n %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<meta http-equiv="Content-Language" content="en-us" />
<meta name="robots" content="NONE,NOARCHIVE" />
<title>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</title>
</head>
<body>
<h1>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</h1>
<ul>
{% ifnotequal directory "/" %}
<li><a href="../">../</a></li>
{% endifnotequal %}
{% for f in file_list %}
<li><a href="{{ f|urlencode }}">{{ f }}</a></li>
{% endfor %}
</ul>
</body>
</html>
"""
template_translatable = ugettext_lazy("Index of %(directory)s")
def directory_index(path, fullpath):
try:
t = loader.select_template([
'static/directory_index.html',
'static/directory_index',
])
except TemplateDoesNotExist:
t = Engine().from_string(DEFAULT_DIRECTORY_INDEX_TEMPLATE)
files = []
for f in os.listdir(fullpath):
if not f.startswith('.'):
if os.path.isdir(os.path.join(fullpath, f)):
f += '/'
files.append(f)
c = Context({
'directory': path + '/',
'file_list': files,
})
return HttpResponse(t.render(c))
def was_modified_since(header=None, mtime=0, size=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about.
"""
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_mtime = parse_http_date(matches.group(1))
header_len = matches.group(3)
if header_len and int(header_len) != size:
raise ValueError
if int(mtime) > header_mtime:
raise ValueError
except (AttributeError, ValueError, OverflowError):
return True
return False
| bsd-3-clause |
MG-RAST/DRISEE | drisee.py | 2 | 25594 | #!/usr/bin/env python
import os, sys, re, time, datetime, hashlib, shutil
import subprocess
import cStringIO
import string, random
from Bio import SeqIO
from random import randrange
from optparse import OptionParser
from multiprocessing import Pool
__doc__ = """
Script to calculate sequence error.
Input: fasta/fastq file to get error on
Output: error matrix file
STDOUT: Runtime summary stats"""
LOG_FILE = ''
ITER_MAX = 0
CONV_MIN = 0
PREF_LEN = 0
def ip (a,b, d): # takes two sequences and offset and returns (number of matches, overlap length)
assert len(b) >= len(a)
if d < 0:
r = len(a) + d
elif (len(a) + d) > len(b):
r = len(b) - d
else:
r= len(a)
c=0
for i in range( 0, r ) :
if (a[i] == b[i+d] and a[i] != "N") :
c+=1
if r==0: r=1
return c ,r
def align ( sequence, adapter ): # takes two sequences, returns best alignment (number of matches, alignlength, offset)
if len(sequence) > len(adapter) :
a, b = adapter, sequence
else:
a, b = sequence, adapter
assert len(b) >= len(a)
la = len(a)
lb = len(b)
bestm = 0
bestr = 0
besto = 0
for i in range(-len(a), len(b)):
(m,r) = ip(a, b, i )
c = float(m)/float(r)
if m > bestm and c >= MINALIGNID and r >= MINOVERLAP :
bestm = m
besto = i
bestr = r
return (bestm , bestr, besto)
def bestalign( sequence, adapters):
type(sequence)
type(adapters)
besta = 0
bestk = 0
for key in adapters.keys():
(m,r,o) = align(sequence, adapters[key])
if m > besta:
besta = m
bestk = key
return (besta, bestk)
def write_file(text, fname, append=None):
if append:
mode = 'a'
else:
mode = 'w'
outhdl = open(fname, mode)
outhdl.write(text)
outhdl.close()
def run_cmd(cmd, in_pipe=None, output=None):
if not output:
output = subprocess.PIPE
if in_pipe:
proc_in = subprocess.Popen( in_pipe, stdout=subprocess.PIPE )
proc = subprocess.Popen( cmd, stdin=proc_in.stdout, stdout=output, stderr=subprocess.PIPE )
else:
proc = subprocess.Popen( cmd, stdout=output, stderr=subprocess.PIPE )
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise IOError("%s\n%s"%(" ".join(cmd), stderr))
return stdout, stderr
def random_truncate(items, cutoff):
if (cutoff < 1) or (len(items) < 2) or (len(items) <= cutoff):
return items
# randomize array (fisher yates shuffle)
i = len(items)
while i > 1:
i = i - 1
j = randrange(i) # 0 <= j <= i-1
items[j], items[i] = items[i], items[j]
return items[:cutoff]
def random_str(size=6):
chars = string.ascii_letters + string.digits
return ''.join(random.choice(chars) for x in range(size))
# kk 11-9-12 # def seq_stats(in_file, fformat, verb):
#def seq_stats(in_file, opts.seq_type, verb):
def seq_stats(in_file, fformat, verb):
#fout, ferr = run_cmd(['seq_length_stats.py', '-f', '-i', in_file, '-t', fformat])
fout, ferr = run_cmd(['seq_length_stats.py', '-f', '-i', in_file, '-t', fformat])
if LOG_FILE and (fout or ferr):
write_file("\n".join([fout, ferr]), LOG_FILE, 1)
if verb and (fout or ferr):
sys.stdout.write("\n".join([fout, ferr]))
lines = fout.strip().split('\n')
stats = {}
for l in lines:
k, v = l.split('\t')
stats[k] = v
return stats
# kk 11-9-12 # def filter_seqs(in_file, out_file, stats, seqper, ambig_max, stdev_multi, filter_min, fformat):
#def filter_seqs(in_file, out_file, stats, seqper, ambig_max, stdev_multi, filter_min, opts.seq_type):
def filter_seqs(in_file, out_file, stats, seqper, ambig_max, stdev_multi, filter_min, fformat):
# get stats
avg_len = float(stats['average_length'])
sdv_len = float(stats['standard_deviation_length'])
min_len = int(avg_len - (sdv_len * stdev_multi))
max_len = int(avg_len + (sdv_len * stdev_multi))
if min_len < filter_min: min_len = filter_min + 1
if max_len < min_len: max_len = min_len + 1
# get filtered fasta file
output_hdl = open(out_file, 'w')
input_hdl = open(in_file, 'rU')
new_num = 0
try:
# kk 11-9-12 # for rec in SeqIO.parse(input_hdl, fformat):
for rec in SeqIO.parse(input_hdl, fformat):
rnd_num = random.random()
if seqper < rnd_num:
continue
ambig = 0
slen = len(rec.seq)
if (slen < min_len) or (slen > max_len):
continue
for char in rec.seq:
if (char == 'n') or (char == 'N'): ambig += 1
if ambig > ambig_max:
continue
new_num += 1
output_hdl.write(">%s\n%s\n" %(random_str(), rec.seq))
finally:
input_hdl.close()
output_hdl.close()
return new_num
def get_contaminated_md5_prefixes_from_fasta(in_file, prefix_len, adapters):
contaminated_md5s = {}
input_hdl = open(in_file, 'rU')
try:
for rec in SeqIO.parse(input_hdl, 'fasta'):
seq = str(rec.seq).upper()
prefix = seq[:prefix_len]
md5 = hashlib.md5( prefix ).hexdigest()
(a,b) = bestalign( rec.seq, adapters )
if a > 0:
contaminated_md5s[md5] = 1
finally:
input_hdl.close()
return contaminated_md5s
def bin_replicate_seqs(in_file, out_file, tmp_dir, prefix_len, nodes):
tmp_file = os.path.join(tmp_dir, os.path.basename(out_file)+'.tmp')
tmp_hdl = open(tmp_file, 'w')
input_hdl = open(in_file, 'rU')
try:
for rec in SeqIO.parse(input_hdl, 'fasta'):
seq = str(rec.seq).upper()
md5 = hashlib.md5( seq[:prefix_len] ).hexdigest()
tmp_hdl.write("%s\t%s\n" %(md5, rec.id))
finally:
input_hdl.close()
tmp_hdl.close()
smem = str(nodes * 2 * 1024) + 'M'
run_cmd(['sort', '-T', tmp_dir, '-S', smem, '-t', "\t", '-k', '1,1', '-o', out_file, tmp_file])
os.remove(tmp_file)
def filter_bins(fname, bin_min, total_max):
bins = {}
bin_num_out = open(fname+'.sum', 'w')
run_cmd(['uniq', '-c'], ['cut', '-f1', fname], bin_num_out)
bin_num_out.close()
bin_num_in = open(fname+'.sum', 'rU')
try:
for line in bin_num_in:
n, b = line.strip().split()
if int(n) >= bin_min:
bins[b] = n
finally:
bin_num_in.close()
return dict([(x,'') for x in random_truncate(bins.keys(), total_max)])
def get_sub_fasta(ids, index_seq, seq_file, sub_fasta):
# get sub fasta
id_echo = ['echo']
id_echo.extend(ids)
stdo,_x = run_cmd(['cdbyank', index_seq, '-d', seq_file], id_echo)
# get min length
seq_lens = []
str_hdl = cStringIO.StringIO(stdo)
for rec in SeqIO.parse(str_hdl, 'fasta'):
seq_lens.append( len(rec.seq) )
str_hdl.close()
# truncate all to min
min_seq = min(seq_lens)
out_hdl = open(sub_fasta, 'w')
str_hdl = cStringIO.StringIO(stdo)
for rec in SeqIO.parse(str_hdl, 'fasta'):
out_hdl.write(">%s\n%s\n"%(rec.id, rec.seq[:min_seq]))
str_hdl.close()
out_hdl.close()
def process_bin(bin_id):
bin_path = os.path.join(TMP_DIR, bin_id)
os.mkdir(bin_path)
cmd = ['run_find_steiner.pl','-i',bin_path+'.fasta','-o',bin_path+'.score','-l',bin_path+'.log','-t',bin_path,'--max_iter',str(ITER_MAX),'--min_conv',str(CONV_MIN)]
sto, ste = run_cmd(cmd)
if LOG_FILE and (sto or ste):
write_file("\n".join([sto, ste]), LOG_FILE, 1)
os.remove(bin_path+'.fasta')
shutil.rmtree(bin_path)
return bin_id
def process_data(data, match, error):
head = data.pop(0)
bps = head.split("\t")
for i, d in enumerate(data):
if not d: continue
counts = filter(lambda x: x != '', d.split("\t"))
counts = map(lambda x: int(x), counts)
if len(counts) != 6: continue
if len(match) <= i: match.insert( i, dict([(x,0) for x in bps]) )
if len(error) <= i: error.insert( i, dict([(x,0) for x in bps]) )
max_bp = max(counts)
for j, c in enumerate(counts):
if c == max_bp: match[i][ bps[j] ] += c
elif c < max_bp: error[i][ bps[j] ] += c
return bps, match, error
def create_output(bps, match, error, per):
total = 0
errs = dict([(x,0) for x in bps])
stext = [ "#\t" + "\t".join(bps) + "\t" + "\t".join(bps) ]
for i in range( len(match) ):
rsum = 0
row = []
for b in bps:
row.append( match[i][b] )
rsum += match[i][b]
if i > (PREF_LEN - 1):
total += match[i][b]
for b in bps:
row.append( error[i][b] )
rsum += error[i][b]
if i > (PREF_LEN - 1):
total += error[i][b]
errs[b] += error[i][b]
if per:
row = map(lambda x: "%f"%(((x * 1.0) / rsum) * 100), row)
else:
row = map(lambda x: str(x), row)
stext.append( "%d\t"%(i+1) + "\t".join(row) )
err_head = map(lambda x: "%s_err"%x, bps)
err_nums = map(lambda x: "%f"%(((errs[x] * 1.0) / total) * 100), bps)
err_sum = ((sum(errs.values()) * 1.0) / total) * 100
err_head.append('bp_err')
err_nums.append("%f"%err_sum)
stext.insert(0, "#\t"+"\t".join(err_nums))
stext.insert(0, "#\t"+"\t".join(err_head))
return err_sum, "\n".join(stext)
usage = "usage: %prog [options] input_seq_file output_stat_file\n" + __doc__
version = "%prog 1.5"
def main(args):
global TMP_DIR, LOG_FILE, ITER_MAX, CONV_MIN, PREF_LEN, MINALIGNID, MINOVERLAP
parser = OptionParser(usage=usage, version=version)
parser.add_option("-p", "--processes", dest="processes", type="int", default=8, help="Number of processes to use [default '8']")
#parser.add_option("-t", "--seq_type", dest="seq_type", default='fasta', help="Sequence type: fasta, fastq [default 'fasta']")
parser.add_option("-t", "--seq_type", dest="seq_type", default='fasta', help="Sequence type: fasta, fastq [default 'fasta']")
parser.add_option("-f", "--filter_seq", dest="filter", action="store_true", default=False, help="Run sequence filtering, length and ambig bp [default off]")
parser.add_option("-r", "--replicate_file", dest="rep_file", default=None, help="File with sorted replicate bins (bin_id, seq_id) [default to calculate replicates]")
parser.add_option("-d", "--tmp_dir", dest="tmpdir", default="/tmp", help="DIR for intermediate files (must be full path). Specified directory must already exist. Files are automatically deleted at analysis completion. [default '/tmp']")
parser.add_option("-l", "--log_file", dest="logfile", default=None, help="Detailed processing related stats [default '/dev/null']")
parser.add_option("", "--percent", dest="percent", action="store_true", default=True, help="Additional output (output_stat_file.per) with percent values [default on]")
parser.add_option("", "--prefix_length", dest="prefix", type="int", default=50, help="Prefix length for replicate bins [default 50]")
parser.add_option("-s", "--seq_max", dest="seq_max", type="int", default=1000000, help="Maximum number of reads to process (chosen randomly) [default 1000000]")
parser.add_option("-a", "--ambig_bp_max", dest="ambig_max", type="int", default=0, help="Maximum number of allowed ambiguity characters before rejection [default 0]")
parser.add_option("-m", "--stdev_multiplier", dest="stdev_multi", type="float", default=2.0, help="Multiplier to stddev to get min and max seq lengths [default 2.0]")
parser.add_option("-n", "--bin_read_min", dest="read_min", type="int", default=20, help="Minimum number of reads in bin to be considered [default 20]")
parser.add_option("-x", "--bin_read_max", dest="read_max", type="int", default=1000, help="Maximum number of reads in bin to process (chosen randomly) [default 1000]")
parser.add_option("-b", "--bin_num_max", dest="num_max", type="int", default=1000, help="Maximum number of bins to process (chosen randomly) [default 1000]")
parser.add_option("-i", "--iter_max", dest="iter_max", type="int", default=10, help="Maximum number of iterations if alignment does not converge [default 10]")
parser.add_option("-c", "--converge_min", dest="conv_min", type="int", default=3, help="Minimum number of iterations to identify convergence [default 3]")
parser.add_option("-j", "--check_contam", dest="check_contam", action="store_true", default=True, help="Separate results for seqs with adapter contamination [default on]")
parser.add_option("-o", "--minoverlap", dest="MINOVERLAP", type="int", default=10, help="Minimum overlap paramter for identifying adapter contamination [default 10]")
parser.add_option("-e", "--fractionid", dest="MINALIGNID", type="float", default=0.9, help="Minimum alignment id for identifying adapter contamination [default 0.9]")
parser.add_option("-g", "--database", dest="database", default="/home/ubuntu/DRISEE/adapterDB.fna", help="Database fasta of adapter sequences [default /home/ubuntu/DRISEE/adapterDB.fna]")
parser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Write runtime summary stats to STDOUT [default off]")
start_time = time.time()
(opts, args) = parser.parse_args()
if len(args) != 2:
parser.error("Incorrect number of arguments")
# check inputs
(in_seq, out_stat) = args
if not (os.path.isfile(in_seq) and os.path.isdir(opts.tmpdir)):
#parser.error("Invalid input files and/or tmp dir")
error_string = ""
if not (os.path.isfile(in_seq)): error_string = ( "Invalid input file (" + in_seq + ")\t;" )
if not (os.path.isdir(opts.tmpdir)): error_string = ( error_string + "Invalid tmp dir (" + opts.tmpdir + ")\n" )
parser.error(error_string)
if opts.processes < 1: opts.processes = 1
if opts.ambig_max < 0: opts.ambig_max = 0
if opts.stdev_multi <=0: opts.stdev_multi = 2
if opts.read_min > opts.read_max:
parser.error("bin_read_min (%d) can not be greater than bin_read_max %d)"%(opts.read_min, opts.out_name))
if opts.read_min < 1: opts.read_min = 1
if opts.read_max < 2: opts.read_max = 2
if opts.num_max < 1: opts.num_max = 1
if opts.iter_max < 1: opts.iter_max = 1
if opts.conv_min < 1: opts.conv_min = 1
if opts.seq_max < 1: opts.seq_max = 2
if opts.prefix < 10: opts.prefix = 10
TMP_DIR = os.path.join(opts.tmpdir, random_str(8)+'.drisee')
LOG_FILE = opts.logfile
ITER_MAX = opts.iter_max
CONV_MIN = opts.conv_min
PREF_LEN = opts.prefix
MINOVERLAP = opts.MINOVERLAP
MINALIGNID = opts.MINALIGNID
DBFILE = opts.database
removeambigtable= string.maketrans('RYWSMKHBVDNrywsmkhbvdn','N'* 22)
adapters = {}
adapters[0] = ""
for seq_record in SeqIO.parse(DBFILE, "fasta"):
adapters[seq_record.description] = string.upper(str(seq_record.seq).translate(removeambigtable))
adapters["%s.R"%seq_record.description] = string.upper(str(seq_record.seq.reverse_complement()).translate(removeambigtable))
os.mkdir(TMP_DIR)
if opts.verbose: sys.stdout.write("Version:\t%s\n"%version)
# seq stats
stats = seq_stats(in_seq, opts.seq_type, opts.verbose)
seqnum = int(stats['sequence_count'])
seqper = float(opts.seq_max) / seqnum
seqmax = 0
if opts.verbose: sys.stdout.write("DRISEE will be run on %d of %d sequences\n"%(min(opts.seq_max, seqnum),seqnum))
# random subselect / length filter
if opts.filter:
if opts.verbose: sys.stdout.write("Filtering with max ambig %d and stddev range x%f ... " %(opts.ambig_max,opts.stdev_multi))
filter_file = os.path.join(TMP_DIR, os.path.basename(in_seq)+'.filter.fasta')
seqmax = filter_seqs(in_seq, filter_file, stats, seqper, opts.ambig_max, opts.stdev_multi, opts.prefix, opts.seq_type)
in_seq = filter_file
opts.seq_type = 'fasta'
if opts.verbose: sys.stdout.write("Done, %s sequences kept\n"%seqmax)
# random subselect / uniquify seqs
else:
if opts.verbose: sys.stdout.write("Making sure seq ids are unique ... ")
out_file = os.path.join(TMP_DIR, os.path.basename(in_seq)+'.uniq.fasta')
out_hdl = open(out_file, 'w')
in_hdl = open(in_seq, 'rU')
try:
for rec in SeqIO.parse(in_hdl, opts.seq_type):
rnd_num = random.random()
if seqper >= rnd_num:
seqmax += 1
out_hdl.write(">%s\n%s\n" %(random_str(), rec.seq))
finally:
in_hdl.close()
out_hdl.close()
in_seq = out_file
opts.seq_type = 'fasta'
if opts.verbose: sys.stdout.write("Done, %s sequences kept\n"%seqmax)
### seq file is always fasta from here on
# dereplication
if not (opts.rep_file and os.path.isfile(opts.rep_file)):
if opts.verbose: sys.stdout.write("Creating replicate bins, prefix size %d bps ... " %opts.prefix)
rep_file = os.path.join(TMP_DIR, os.path.basename(in_seq)+'.derep')
bin_replicate_seqs(in_seq, rep_file, TMP_DIR, opts.prefix, opts.processes)
opts.rep_file = rep_file
if opts.verbose: sys.stdout.write("Done\n")
# index fasta file
if opts.verbose: sys.stdout.write("Creating cdb index ... ")
index_seq = os.path.join(TMP_DIR, os.path.basename(in_seq)+'.cidx')
iout, ierr = run_cmd(['cdbfasta', in_seq, '-o', index_seq])
if opts.logfile and (iout or ierr):
write_file("\n".join([iout, ierr]), opts.logfile, 1)
if opts.verbose: sys.stdout.write("Done\n")
# filter bin set
if opts.verbose: sys.stdout.write("Getting %d random bins with >= %d reads ... " %(opts.num_max,opts.read_min))
bins = filter_bins(opts.rep_file, opts.read_min, opts.num_max)
size = len(bins)
if size == 0:
msg = "No available bins >= %d to process, quiting\n"%opts.read_min
if opts.logfile: write_file(msg, opts.logfile, 1)
if opts.verbose: sys.stdout.write(msg)
write_file('', out_stat)
shutil.rmtree(TMP_DIR)
return 0
if opts.verbose: sys.stdout.write("Done, %s bins found\n"%size)
# check for adapter contamination in one sequence for each md5 in filtered set
if opts.check_contam:
# bins_to_rep_ids will contain a dictionary of each md5 in filtered set to one sequence of the sequence ids
bins_to_rep_ids = {}
# bins_to_seq_count will contain a dictionary of each md5 in filtered set to a count of the number of seqs in that bin
bins_to_seq_count = {}
dhdl = open(opts.rep_file, 'rU')
try:
for line in dhdl:
(bid, sid) = line.split()
if bid in bins:
bins_to_rep_ids[bid] = sid
if bid in bins_to_seq_count:
bins_to_seq_count[bid] += 1
else:
bins_to_seq_count[bid] = 1
finally:
dhdl.close()
# generate sequence file for list of sequence ids
rep_seqs_fasta = os.path.join(TMP_DIR, "rep_seqs.fasta")
get_sub_fasta(bins_to_rep_ids.values(), index_seq, in_seq, rep_seqs_fasta)
contaminated_md5s = get_contaminated_md5_prefixes_from_fasta(rep_seqs_fasta, opts.prefix, adapters)
os.remove(rep_seqs_fasta)
# create trimmed bin fasta files
to_process = []
total_ids = 0
ids = []
curr = ''
dhdl = open(opts.rep_file, 'rU')
if opts.verbose: sys.stdout.write("Creating %d bin files with >= %d reads ..." %(size,opts.read_min))
try:
for line in dhdl:
(bid, sid) = line.split()
if not (bid and sid and (bid in bins)):
continue
if curr == '':
curr = bid
if bid != curr:
if len(ids) > opts.read_max:
ids = random_truncate(ids, opts.read_max)
bin_fasta = os.path.join(TMP_DIR, curr+".fasta")
get_sub_fasta(ids, index_seq, in_seq, bin_fasta) ## seqs truncated to min
if os.path.isfile(bin_fasta):
to_process.append(curr)
total_ids += len(ids)
curr = bid
ids = []
ids.append(sid)
if len(ids) > opts.read_max:
ids = random_truncate(ids, opts.read_max)
bin_fasta = os.path.join(TMP_DIR, curr+".fasta")
get_sub_fasta(ids, index_seq, in_seq, bin_fasta) ## seqs truncated to min
if os.path.isfile(bin_fasta):
to_process.append(curr)
total_ids += len(ids)
finally:
dhdl.close()
if opts.verbose: sys.stdout.write("Done\n")
# process bins
min_proc = 1
if opts.processes > min_proc:
min_proc = opts.processes
if len(to_process) < min_proc:
min_proc = len(to_process)
if opts.verbose: sys.stdout.write("Processing %d bins (%d sequences total) using %d threads ... "%(size,total_ids,min_proc))
pool = Pool(processes=min_proc)
finish = pool.map(process_bin, to_process, 1)
pool.close()
pool.join()
if opts.verbose: sys.stdout.write("Done\n")
# merge results
if len(finish) == 0:
msg = "No bins were processed, quiting\n"
if opts.logfile: write_file(msg, opts.logfile, 1)
if opts.verbose: sys.stdout.write(msg)
write_file('', out_stat)
return 0
bases = []
match = []
error = []
# if check_contam is on, create stats for contaminated and non-contaminated seqs in addition to stats for all seqs
if opts.check_contam:
cbases = []
cmatch = []
cerror = []
ncbases = []
ncmatch = []
ncerror = []
if opts.verbose: sys.stdout.write("Merging scores from %d bins ... "%len(finish))
for bid in finish:
bin_score = os.path.join(TMP_DIR, bid+'.score')
bin_log = os.path.join(TMP_DIR, bid+'.log')
if opts.logfile and os.path.isfile(bin_log):
lhdl = open(bin_log, 'rU')
write_file(lhdl.read(), opts.logfile, 1)
lhdl.close()
if os.path.isfile(bin_score):
shdl = open(bin_score, 'rU')
data = shdl.read().split("\n")
data_copy = list(data)
bases, match, error = process_data(data_copy, match, error)
if opts.check_contam:
if bid in contaminated_md5s:
data_copy = list(data)
cbases, cmatch, cerror = process_data(data_copy, cmatch, cerror)
else:
data_copy = list(data)
ncbases, ncmatch, ncerror = process_data(data_copy, ncmatch, ncerror)
shdl.close()
if opts.verbose: sys.stdout.write("Done\n")
if opts.check_contam:
contam = len(contaminated_md5s)
ncontam = len(finish) - len(contaminated_md5s)
contam_seq_count = 0
ncontam_seq_count = 0
for md5 in bins_to_seq_count:
if md5 in contaminated_md5s:
contam_seq_count += bins_to_seq_count[md5]
elif md5 in finish:
ncontam_seq_count += bins_to_seq_count[md5]
err_score, score_text = create_output(bases, match, error, 0)
write_file(score_text, out_stat)
if opts.check_contam:
if contam > 0:
cerr_score, cscore_text = create_output(cbases, cmatch, cerror, 0)
write_file(cscore_text, out_stat+".contaminated")
if ncontam > 0:
ncerr_score, ncscore_text = create_output(ncbases, ncmatch, ncerror, 0)
write_file(ncscore_text, out_stat+".non_contaminated")
if opts.percent:
_err, per_text = create_output(bases, match, error, 1)
write_file(per_text, out_stat+'.per')
if opts.check_contam:
if contam > 0:
_err, cper_text = create_output(cbases, cmatch, cerror, 1)
write_file(cper_text, out_stat+'.contaminated.per')
if ncontam > 0:
_err, ncper_text = create_output(ncbases, ncmatch, ncerror, 1)
write_file(ncper_text, out_stat+'.non_contaminated.per')
# cleanup
shutil.rmtree(TMP_DIR)
end_time = time.time() - start_time
summary = "\nCompleted in " + str(datetime.timedelta(seconds=end_time)) + "\n"
summary += "Input seqs\t" + str(seqmax) + "\n"
summary += "Processed bins\t" + str(size) + "\n"
summary += "Processed seqs\t" + str(total_ids) + "\n"
summary += "Drisee score\t" + str(err_score) + "\n"
write_file(summary, LOG_FILE, 1)
if opts.verbose: sys.stdout.write(summary)
if opts.check_contam and contam > 0:
contam_summary = "\nContam bins\t" + str(contam) + "\n"
contam_summary += "Contam seqs\t" + str(contam_seq_count) + "\n"
contam_summary += "Drisee score\t" + str(cerr_score) + "\n"
write_file(contam_summary, LOG_FILE, 1)
if opts.verbose: sys.stdout.write(contam_summary)
if opts.check_contam and ncontam > 0:
ncontam_summary = "\nNon-contam bins\t" + str(ncontam) + "\n"
ncontam_summary += "Non-contam seqs\t" + str(ncontam_seq_count) + "\n"
ncontam_summary += "Drisee score\t" + str(ncerr_score) + "\n"
write_file(ncontam_summary, LOG_FILE, 1)
if opts.verbose: sys.stdout.write(ncontam_summary)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| bsd-2-clause |
CCI-Tools/ect-core | cate/ds/local.py | 1 | 37425 | # The MIT License (MIT)
# Copyright (c) 2016, 2017 by the ESA CCI Toolbox development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Description
===========
This plugin module adds the local data source to the data store registry.
Verification
============
The module's unit-tests are located in
`test/ds/test_esa_cci_ftp.py <https://github.com/CCI-Tools/cate/blob/master/test/ds/test_esa_cci_ftp.py>`_
and may be executed using ``$ py.test test/ds/test_esa_cci_ftp.py --cov=cate/ds/esa_cci_ftp.py``
for extra code coverage information.
Components
==========
"""
import json
import os
import re
import shutil
import socket
import uuid
import warnings
from collections import OrderedDict
from datetime import datetime
from glob import glob
from typing import Optional, Sequence, Union, Any, Tuple, List
from urllib.error import URLError, HTTPError
import psutil
import shapely.geometry
import xarray as xr
from dateutil import parser
from cate.conf import get_config_value, get_data_stores_path, GLOBAL_CONF_FILE
from cate.conf.defaults import NETCDF_COMPRESSION_LEVEL
from cate.core.ds import DATA_STORE_REGISTRY, DataAccessError, NetworkError, DataAccessWarning, DataSourceStatus, \
DataStore, DataSource, open_xarray_dataset, DataStoreNotice
from cate.core.opimpl import subset_spatial_impl, normalize_impl, adjust_spatial_attrs_impl
from cate.core.types import PolygonLike, TimeRange, TimeRangeLike, VarNames, VarNamesLike, ValidationError
from cate.util.monitor import Monitor
__author__ = "Norman Fomferra (Brockmann Consult GmbH), " \
"Marco Zühlke (Brockmann Consult GmbH), " \
"Chris Bernat (Telespazio VEGA UK Ltd), " \
"Paolo Pesciullesi (Telespazio VEGA UK Ltd)"
_REFERENCE_DATA_SOURCE_TYPE = "FILE_PATTERN"
_NAMESPACE = uuid.UUID(bytes=b"1234567890123456", version=3)
def get_data_store_path():
return os.environ.get('CATE_LOCAL_DATA_STORE_PATH',
os.path.join(get_data_stores_path(), 'local'))
def add_to_data_store_registry():
data_store = LocalDataStore('local', get_data_store_path())
DATA_STORE_REGISTRY.add_data_store(data_store)
# TODO (kbernat): document this class
class LocalDataSource(DataSource):
"""
Local Data Source implementation provides access to locally stored data sets.
:param ds_id: unique ID of data source
:param files:
:param data_store:
:param temporal_coverage:
:param spatial_coverage:
:param variables:
:param meta_info:
"""
def __init__(self,
ds_id: str,
files: Union[Sequence[str], OrderedDict],
data_store: 'LocalDataStore',
temporal_coverage: TimeRangeLike.TYPE = None,
spatial_coverage: PolygonLike.TYPE = None,
variables: VarNamesLike.TYPE = None,
meta_info: dict = None,
status: DataSourceStatus = None):
self._id = ds_id
if isinstance(files, Sequence):
self._files = OrderedDict.fromkeys(files)
else:
self._files = files
self._data_store = data_store
initial_temporal_coverage = TimeRangeLike.convert(temporal_coverage) if temporal_coverage else None
if not initial_temporal_coverage:
files_number = len(self._files.items())
if files_number > 0:
files_range = list(self._files.values())
if files_range:
if isinstance(files_range[0], Tuple):
initial_temporal_coverage = TimeRangeLike.convert(tuple([files_range[0][0],
files_range[files_number - 1][1]]))
elif isinstance(files_range[0], datetime):
initial_temporal_coverage = TimeRangeLike.convert((files_range[0],
files_range[files_number - 1]))
self._temporal_coverage = initial_temporal_coverage
self._spatial_coverage = PolygonLike.convert(spatial_coverage) if spatial_coverage else None
self._variables = VarNamesLike.convert(variables) if variables else []
self._meta_info = meta_info if meta_info else OrderedDict()
if self._variables and not self._meta_info.get('variables', None):
self._meta_info['variables'] = [
{'name': var_name,
'units': '',
'long_name': '',
'standard_name': ''
} for var_name in self._variables]
self._status = status if status else DataSourceStatus.READY
def _resolve_file_path(self, path) -> Sequence:
return glob(os.path.join(self._data_store.data_store_path, path))
def open_dataset(self,
time_range: TimeRangeLike.TYPE = None,
region: PolygonLike.TYPE = None,
var_names: VarNamesLike.TYPE = None,
protocol: str = None,
monitor: Monitor = Monitor.NONE) -> Any:
time_range = TimeRangeLike.convert(time_range) if time_range else None
var_names = VarNamesLike.convert(var_names) if var_names else None
paths = []
if time_range:
time_series = list(self._files.values())
file_paths = list(self._files.keys())
for i in range(len(time_series)):
if time_series[i]:
if isinstance(time_series[i], Tuple) and \
time_series[i][0] >= time_range[0] and \
time_series[i][1] <= time_range[1]:
paths.extend(self._resolve_file_path(file_paths[i]))
elif isinstance(time_series[i], datetime) and time_range[0] <= time_series[i] < time_range[1]:
paths.extend(self._resolve_file_path(file_paths[i]))
else:
for file in self._files.items():
paths.extend(self._resolve_file_path(file[0]))
if not paths:
raise self._empty_error(time_range)
paths = sorted(set(paths))
try:
excluded_variables = self._meta_info.get('exclude_variables')
if excluded_variables:
drop_variables = [variable.get('name') for variable in excluded_variables]
else:
drop_variables = None
# TODO: combine var_names and drop_variables
return open_xarray_dataset(paths,
region=region,
var_names=var_names,
drop_variables=drop_variables,
monitor=monitor)
except HTTPError as e:
raise self._cannot_access_error(time_range, region, var_names,
verb="open", cause=e) from e
except (URLError, socket.timeout) as e:
raise self._cannot_access_error(time_range, region, var_names,
verb="open", cause=e, error_cls=NetworkError) from e
except OSError as e:
raise self._cannot_access_error(time_range, region, var_names,
verb="open", cause=e) from e
@staticmethod
def _get_harmonized_coordinate_value(attrs: dict, attr_name: str):
value = attrs.get(attr_name, 'nan')
if isinstance(value, str):
return float(value.rstrip('degrees').rstrip('f'))
return value
def _make_local(self,
local_ds: 'LocalDataSource',
time_range: TimeRangeLike.TYPE = None,
region: PolygonLike.TYPE = None,
var_names: VarNamesLike.TYPE = None,
monitor: Monitor = Monitor.NONE):
local_id = local_ds.id
time_range = TimeRangeLike.convert(time_range) if time_range else None
var_names = VarNamesLike.convert(var_names) if var_names else None # type: Sequence
compression_level = get_config_value('NETCDF_COMPRESSION_LEVEL', NETCDF_COMPRESSION_LEVEL)
compression_enabled = True if compression_level > 0 else False
encoding_update = dict()
if compression_enabled:
encoding_update.update({'zlib': True, 'complevel': compression_level})
local_path = os.path.join(local_ds.data_store.data_store_path, local_id)
data_store_path = local_ds.data_store.data_store_path
if not os.path.exists(local_path):
os.makedirs(local_path)
monitor.start("Sync " + self.id, total_work=len(self._files.items()))
for remote_relative_filepath, coverage in self._files.items():
child_monitor = monitor.child(work=1)
file_name = os.path.basename(remote_relative_filepath)
local_relative_filepath = os.path.join(local_id, file_name)
local_absolute_filepath = os.path.join(data_store_path, local_relative_filepath)
remote_absolute_filepath = os.path.join(self._data_store.data_store_path, remote_relative_filepath)
if isinstance(coverage, Tuple):
time_coverage_start = coverage[0]
time_coverage_end = coverage[1]
if not time_range or time_coverage_start >= time_range[0] and time_coverage_end <= time_range[1]:
if region or var_names:
do_update_of_variables_meta_info_once = True
do_update_of_region_meta_info_once = True
remote_dataset = None
try:
remote_dataset = xr.open_dataset(remote_absolute_filepath)
if var_names:
remote_dataset = remote_dataset.drop(
[var_name for var_name in remote_dataset.data_vars.keys()
if var_name not in var_names])
if region:
remote_dataset = normalize_impl(remote_dataset)
remote_dataset = adjust_spatial_attrs_impl(subset_spatial_impl(remote_dataset, region),
allow_point=False)
if do_update_of_region_meta_info_once:
# subset_spatial_impl
local_ds.meta_info['bbox_maxx'] = remote_dataset.attrs['geospatial_lon_max']
local_ds.meta_info['bbox_minx'] = remote_dataset.attrs['geospatial_lon_min']
local_ds.meta_info['bbox_maxy'] = remote_dataset.attrs['geospatial_lat_max']
local_ds.meta_info['bbox_miny'] = remote_dataset.attrs['geospatial_lat_min']
do_update_of_region_meta_info_once = False
if compression_enabled:
for sel_var_name in remote_dataset.variables.keys():
remote_dataset.variables.get(sel_var_name).encoding.update(encoding_update)
remote_dataset.to_netcdf(local_absolute_filepath)
child_monitor.progress(work=1, msg=str(time_coverage_start))
finally:
if do_update_of_variables_meta_info_once and remote_dataset is not None:
variables_info = local_ds.meta_info.get('variables', [])
local_ds.meta_info['variables'] = [var_info for var_info in variables_info
if var_info.get('name')
in remote_dataset.variables.keys()
and var_info.get('name')
not in remote_dataset.dims.keys()]
# noinspection PyUnusedLocal
do_update_of_variables_meta_info_once = False
local_ds.add_dataset(os.path.join(local_id, file_name),
(time_coverage_start, time_coverage_end))
child_monitor.done()
else:
shutil.copy(remote_absolute_filepath, local_absolute_filepath)
local_ds.add_dataset(local_relative_filepath, (time_coverage_start, time_coverage_end))
child_monitor.done()
monitor.done()
return local_id
def make_local(self,
local_name: str,
local_id: str = None,
time_range: TimeRangeLike.TYPE = None,
region: PolygonLike.TYPE = None,
var_names: VarNamesLike.TYPE = None,
monitor: Monitor = Monitor.NONE) -> Optional[DataSource]:
time_range = TimeRangeLike.convert(time_range) if time_range else None
region = PolygonLike.convert(region) if region else None
var_names = VarNamesLike.convert(var_names) if var_names else None
local_store = DATA_STORE_REGISTRY.get_data_store('local')
if not local_store:
add_to_data_store_registry()
local_store = DATA_STORE_REGISTRY.get_data_store('local')
if not local_store:
raise ValueError('Cannot initialize `local` DataStore')
_uuid = LocalDataStore.generate_uuid(ref_id=self.id, time_range=time_range, region=region, var_names=var_names)
if not local_name or len(local_name) == 0:
local_name = "local.{}.{}".format(self.id, _uuid)
existing_ds_list = local_store.query(ds_id=local_name)
if len(existing_ds_list) == 1:
return existing_ds_list[0]
else:
existing_ds_list = local_store.query(ds_id='local.%s' % local_name)
if len(existing_ds_list) == 1:
if existing_ds_list[0].meta_info.get('uuid', None) == _uuid:
return existing_ds_list[0]
else:
raise ValueError('Datastore {} already contains dataset {}'.format(local_store.id, local_name))
local_meta_info = self.meta_info.copy()
local_meta_info['ref_uuid'] = local_meta_info.get('uuid', None)
local_meta_info['uuid'] = _uuid
local_ds = local_store.create_data_source(local_name, region, local_name,
time_range=time_range, var_names=var_names,
meta_info=self.meta_info.copy())
if local_ds:
if not local_ds.is_complete:
self._make_local(local_ds, time_range, region, var_names, monitor=monitor)
if local_ds.is_empty:
local_store.remove_data_source(local_ds)
return None
local_store.register_ds(local_ds)
return local_ds
return None
def add_dataset(self, file, time_coverage: TimeRangeLike.TYPE = None, update: bool = False,
extract_meta_info: bool = False):
if update or self._files.keys().isdisjoint([file]):
self._files[file] = time_coverage
if time_coverage:
self._extend_temporal_coverage(TimeRangeLike.convert(time_coverage))
self._files = OrderedDict(sorted(self._files.items(),
key=lambda f: f[1] if isinstance(f, Tuple) and f[1] else datetime.max))
if extract_meta_info:
try:
ds = xr.open_dataset(file)
self._meta_info.update(ds.attrs)
except OSError:
pass
self.save()
def _extend_temporal_coverage(self, time_interval: TimeRangeLike.TYPE):
"""
:param time_interval: Time range to be added to data source temporal coverage
:return:
"""
time_range = TimeRangeLike.convert(time_interval)
if not time_range or None in time_range:
return
if self._temporal_coverage and not (None in self._temporal_coverage):
if time_range[0] >= self._temporal_coverage[1]:
self._temporal_coverage = tuple([self._temporal_coverage[0], time_range[1]])
elif time_range[1] <= self._temporal_coverage[0]:
self._temporal_coverage = tuple([time_range[0], self._temporal_coverage[1]])
else:
self._temporal_coverage = time_range
self.save()
def update_temporal_coverage(self, time_range: TimeRangeLike.TYPE):
"""
:param time_range: Time range to be added to data source temporal coverage
:return:
"""
self._extend_temporal_coverage(TimeRangeLike.convert(time_range))
def _reduce_temporal_coverage(self, time_interval: TimeRangeLike.TYPE):
"""
:param time_interval: Time range to be removed from data source temporal coverage
:return:
"""
time_range = TimeRangeLike.convert(time_interval)
if not time_range or not self._temporal_coverage:
return
if time_range[0] > self._temporal_coverage[0] and time_range[1] == self._temporal_coverage[1]:
self._temporal_coverage = (self._temporal_coverage[0], time_range[0])
if time_range[1] < self._temporal_coverage[1] and time_range[0] == self._temporal_coverage[0]:
self._temporal_coverage = (time_range[1], self._temporal_coverage[1])
def reduce_temporal_coverage(self, time_coverage: TimeRangeLike.TYPE):
files_to_remove = []
time_range_to_be_removed = None
for file, time_range in self._files.items():
if time_coverage[0] <= time_range[0] <= time_coverage[1] \
and time_coverage[0] <= time_range[1] <= time_coverage[1]:
files_to_remove.append(file)
if not time_range_to_be_removed and isinstance(time_range, Tuple):
time_range_to_be_removed = time_range
else:
time_range_to_be_removed = (time_range_to_be_removed[0], time_range[1])
elif time_coverage[0] <= time_range[0] <= time_coverage[1]:
time_range_to_be_removed = (time_range_to_be_removed[0], time_range[0])
elif time_coverage[0] <= time_range[1] <= time_coverage[1]:
time_range_to_be_removed = time_range[1], time_coverage[1]
for file in files_to_remove:
os.remove(os.path.join(self._data_store.data_store_path, file))
del self._files[file]
if time_range_to_be_removed:
self._reduce_temporal_coverage(time_range_to_be_removed)
def save(self, unlock: bool = False):
self._data_store.save_data_source(self, unlock)
def temporal_coverage(self, monitor: Monitor = Monitor.NONE) -> Optional[TimeRange]:
return self._temporal_coverage
def spatial_coverage(self):
if not self._spatial_coverage and \
set(self._meta_info.keys()).issuperset({'bbox_minx', 'bbox_miny', 'bbox_maxx', 'bbox_maxy'}):
self._spatial_coverage = PolygonLike.convert(",".join([
self._meta_info.get('bbox_minx'),
self._meta_info.get('bbox_miny'),
self._meta_info.get('bbox_maxx'),
self._meta_info.get('bbox_maxy')])
)
return self._spatial_coverage
@property
def data_store(self) -> 'LocalDataStore':
return self._data_store
@property
def status(self) -> DataSourceStatus:
return self._status
@property
def id(self) -> str:
return self._id
@property
def meta_info(self) -> OrderedDict:
return self._meta_info
@property
def variables_info(self):
return self._meta_info.get('variables', [])
@property
def info_string(self):
return 'Files: %s' % (' '.join(self._files))
@property
def is_complete(self) -> bool:
"""
Return a DataSource creation state
:return:
"""
return self._status is DataSourceStatus.READY
@property
def is_empty(self) -> bool:
"""
Check if DataSource is empty
"""
return not self._files or len(self._files) == 0
def set_completed(self, state: bool):
"""
Sets state of DataSource completion
"""
if state:
self._status = DataSourceStatus.READY
else:
self._status = DataSourceStatus.PROCESSING
def _repr_html_(self):
import html
return '<table style="border:0;">\n' \
'<tr><td>Name</td><td><strong>%s</strong></td></tr>\n' \
'<tr><td>Files</td><td><strong>%s</strong></td></tr>\n' \
'</table>\n' % (html.escape(self._id), html.escape(' '.join(self._files)))
def to_json_dict(self):
"""
Return a JSON-serializable dictionary representation of this object.
:return: A JSON-serializable dictionary
"""
self._meta_info['status'] = self._status.name
config = OrderedDict({
'name': self._id,
'meta_info': self._meta_info,
'files': [[item[0], item[1][0], item[1][1]] if item[1] else [item[0]] for item in self._files.items()]
})
return config
@classmethod
def from_json_dict(cls, json_dict: dict, data_store: 'LocalDataStore') -> Optional['LocalDataSource']:
"""
Allows to deserialize (load from json) LocalDataSource object.
"""
name = json_dict.get('name')
files = json_dict.get('files', None)
variables = []
temporal_coverage = None
spatial_coverage = None
meta_info = json_dict.get('meta_info', OrderedDict())
meta_data = json_dict.get('meta_data', None)
if meta_data:
temporal_coverage = meta_data.get('temporal_coverage', meta_data.get('temporal_covrage', None))
spatial_coverage = meta_data.get('spatial_coverage', None)
variables = meta_data.get('variables', None)
if meta_info:
if not variables:
variables = [v.get('name') for v in meta_info.get('variables', dict()) if not v.get('name', None)]
if not temporal_coverage:
temporal_coverage_start = meta_info.get('temporal_coverage_start', None)
temporal_coverage_end = meta_info.get('temporal_coverage_end', None)
if temporal_coverage_start and temporal_coverage_end:
temporal_coverage = temporal_coverage_start, temporal_coverage_end
files_dict = OrderedDict()
if name and isinstance(files, list):
if len(files) > 0:
if isinstance(files[0], list):
file_details_length = len(files[0])
if file_details_length > 2:
files_dict = OrderedDict((item[0],
(parser.parse(item[1]).replace(microsecond=0),
parser.parse(item[2]).replace(microsecond=0))
if item[1] and item[2] else None)
for item in files)
elif file_details_length > 0:
files_dict = OrderedDict((item[0], parser.parse(item[1]).replace(microsecond=0))
if len(item) > 1 else (item[0], None) for item in files)
else:
files_dict = files
return LocalDataSource(name, files_dict, data_store, temporal_coverage, spatial_coverage, variables,
meta_info=meta_info)
class LocalDataStore(DataStore):
def __init__(self, ds_id: str, store_dir: str):
super().__init__(ds_id, title='Local Data Sources', is_local=True)
self._store_dir = store_dir
self._data_sources = None
@property
def description(self) -> Optional[str]:
"""
Return a human-readable description for this data store as plain text.
The text may use Markdown formatting.
"""
return ("The local data store represents "
"all the data sources in your local file system known by Cate. "
"It contains any downloaded remote data sources or files in your file system "
"manually added.")
@property
def notices(self) -> Optional[List[DataStoreNotice]]:
"""
Return an optional list of notices for this data store that can be used to inform users about the
conventions, standards, and data extent used in this data store or upcoming service outages.
"""
return [
DataStoreNotice("localDataStorage",
"Local Data Storage",
"The local data store is currently configured to synchronize remote data in the "
f"`{get_data_stores_path()}`.\n"
"You can change this location either "
f"in Cate's configuration file `{GLOBAL_CONF_FILE}` "
"or in the user preference settings of Cate Desktop.\n"
"In order to keep your data, move your old directory to the new location, before "
"changing the location.",
intent="primary",
icon="info-sign"),
]
def add_pattern(self, data_source_id: str, files: Union[str, Sequence[str]] = None) -> 'DataSource':
data_source = self.create_data_source(data_source_id)
if isinstance(files, str) and len(files) > 0:
files = [files]
is_first_file = True
if not files:
raise ValueError("files pattern cannot be empty")
for file in files:
if is_first_file:
data_source.add_dataset(file, extract_meta_info=True)
is_first_file = False
else:
data_source.add_dataset(file)
self.register_ds(data_source)
return data_source
def remove_data_source(self, data_source: Union[str, DataSource], remove_files: bool = True):
if isinstance(data_source, str):
data_sources = self.query(ds_id=data_source)
if not data_sources or len(data_sources) != 1:
return
data_source = data_sources[0]
file_name = os.path.join(self._store_dir, data_source.id + '.json')
if os.path.isfile(file_name):
os.remove(file_name)
lock_file = os.path.join(self._store_dir, data_source.id + '.lock')
if os.path.isfile(lock_file):
os.remove(lock_file)
if remove_files:
data_source_path = os.path.join(self._store_dir, data_source.id)
if os.path.isdir(data_source_path):
shutil.rmtree(os.path.join(self._store_dir, data_source.id), ignore_errors=True)
if data_source in self._data_sources:
self._data_sources.remove(data_source)
def register_ds(self, data_source: LocalDataSource):
data_source.set_completed(True)
self._data_sources.append(data_source)
@classmethod
def generate_uuid(cls, ref_id: str,
time_range: Optional[TimeRange] = None,
region: Optional[shapely.geometry.Polygon] = None,
var_names: Optional[VarNames] = None) -> str:
if time_range:
ref_id += TimeRangeLike.format(time_range)
if region:
ref_id += PolygonLike.format(region)
if var_names:
ref_id += VarNamesLike.format(var_names)
return str(uuid.uuid3(_NAMESPACE, ref_id))
@classmethod
def generate_title(cls, title: str,
time_range: Optional[TimeRange] = None,
region: Optional[shapely.geometry.Polygon] = None,
var_names: Optional[VarNames] = None) -> str:
if time_range:
title += " [TimeRange:{}]".format(TimeRangeLike.format(time_range))
if region:
title += " [Region:{}]".format(PolygonLike.format(region))
if var_names:
title += " [Variables:{}]".format(VarNamesLike.format(var_names))
return title
def create_data_source(self, data_source_id: str, region: PolygonLike.TYPE = None,
title: str = None,
time_range: TimeRangeLike.TYPE = None, var_names: VarNamesLike.TYPE = None,
meta_info: OrderedDict = None, lock_file: bool = False):
self._init_data_sources()
if title:
if not meta_info:
meta_info = OrderedDict()
meta_info['title'] = title
if not re.match(r'^[a-zA-Z0-9_.\-]*$', data_source_id):
raise ValidationError('Unaccepted characters in data source name "{}"'.format(data_source_id),
hint='Use only letters, numbers, dots or underscore in the data source name')
if not data_source_id.startswith('%s.' % self.id):
data_source_id = '%s.%s' % (self.id, data_source_id)
lock_filename = '{}.lock'.format(data_source_id)
lock_filepath = os.path.join(self._store_dir, lock_filename)
pid = os.getpid()
create_time = int(psutil.Process(pid).create_time() * 1000000)
data_source = None
for ds in self._data_sources:
if ds.id == data_source_id:
if lock_file and os.path.isfile(lock_filepath):
with open(lock_filepath, 'r') as lock_file:
writer_pid = lock_file.readline()
if writer_pid:
writer_create_time = -1
writer_pid, writer_timestamp = [(int(val) for val in writer_pid.split(":"))
if ":" in writer_pid else writer_pid, writer_create_time]
if psutil.pid_exists(writer_pid) and writer_pid != pid:
if writer_timestamp > writer_create_time:
writer_create_time = int(psutil.Process(writer_pid).create_time() * 1000000)
if writer_create_time == writer_timestamp:
raise DataAccessError(f'Data source "{data_source_id}" is'
f' currently being created by another'
f' process (PID {writer_pid})')
# ds.temporal_coverage() == time_range and
if ds.spatial_coverage() == region \
and ds.variables_info == var_names:
data_source = ds
data_source.set_completed(False)
break
raise DataAccessError(f'Data source "{data_source_id}" already exists.')
if not data_source:
data_source = LocalDataSource(data_source_id, files=[], data_store=self, spatial_coverage=region,
variables=var_names, temporal_coverage=time_range, meta_info=meta_info,
status=DataSourceStatus.PROCESSING)
data_source.set_completed(False)
self._save_data_source(data_source)
if lock_file:
with open(lock_filepath, 'w') as lock_file:
lock_file.write("{}:{}".format(pid, create_time))
return data_source
@property
def data_store_path(self):
"""Path to directory that stores the local data source files."""
return self._store_dir
def query(self, ds_id: str = None, query_expr: str = None, monitor: Monitor = Monitor.NONE) \
-> Sequence[LocalDataSource]:
self._init_data_sources()
if ds_id or query_expr:
return [ds for ds in self._data_sources if ds.matches(ds_id=ds_id, query_expr=query_expr)]
return self._data_sources
def __repr__(self):
return "LocalFilePatternDataStore(%s)" % repr(self.id)
def _repr_html_(self):
self._init_data_sources()
rows = []
row_count = 0
for data_source in self._data_sources:
row_count += 1
# noinspection PyProtectedMember
rows.append('<tr><td><strong>%s</strong></td><td>%s</td></tr>' % (row_count, data_source._repr_html_()))
return '<p>Contents of LocalFilePatternDataStore "%s"</p><table>%s</table>' % (self.id, '\n'.join(rows))
def _init_data_sources(self, skip_broken: bool = True):
"""
:param skip_broken: In case of broken data sources skip loading and log warning instead of rising Error.
:return:
"""
if self._data_sources:
return
os.makedirs(self._store_dir, exist_ok=True)
json_files = [f for f in os.listdir(self._store_dir)
if os.path.isfile(os.path.join(self._store_dir, f)) and f.endswith('.json')]
unfinished_ds = [f for f in os.listdir(self._store_dir)
if os.path.isfile(os.path.join(self._store_dir, f)) and f.endswith('.lock')]
if skip_broken:
json_files = [f for f in json_files if f.replace('.json', '.lock') not in unfinished_ds]
self._data_sources = []
for json_file in json_files:
try:
data_source = self._load_data_source(os.path.join(self._store_dir, json_file))
if data_source:
self._data_sources.append(data_source)
except DataAccessError as e:
if skip_broken:
warnings.warn(str(e), DataAccessWarning, stacklevel=0)
else:
raise e
def save_data_source(self, data_source, unlock: bool = False):
self._save_data_source(data_source)
if unlock:
lock_file = os.path.join(self._store_dir, data_source.id + '.lock')
if os.path.isfile(lock_file):
os.remove(lock_file)
def _save_data_source(self, data_source):
json_dict = data_source.to_json_dict()
dump_kwargs = dict(indent=' ', default=self._json_default_serializer)
file_name = os.path.join(self._store_dir, data_source.id + '.json')
try:
with open(file_name, 'w') as fp:
json.dump(json_dict, fp, **dump_kwargs)
except EnvironmentError as e:
raise DataAccessError(f"Couldn't save data source configuration file {file_name}") from e
def _load_data_source(self, json_path):
json_dict = self._load_json_file(json_path)
if json_dict:
return LocalDataSource.from_json_dict(json_dict, self)
def invalidate(self):
self._data_sources = None
self._init_data_sources()
@staticmethod
def _load_json_file(json_path: str):
if os.path.isfile(json_path):
try:
with open(json_path) as fp:
return json.load(fp=fp) or {}
except json.decoder.JSONDecodeError as e:
raise DataAccessError(f"Cannot load data source configuration from {json_path}") from e
else:
raise DataAccessError(f"Data source configuration does not exists: {json_path}")
@staticmethod
def _json_default_serializer(obj):
if isinstance(obj, datetime):
return obj.replace(microsecond=0).isoformat()
else:
warnings.warn(f'Not sure how to serialize {obj} of {type(obj)}')
return str(obj)
| mit |
popazerty/e2 | lib/python/Plugins/SystemPlugins/RemoteControlCode/plugin.py | 4 | 4307 | from enigma import ePicLoad
from Screens.Screen import Screen
from Components.ActionMap import ActionMap
from Components.Label import Label
from Components.ConfigList import ConfigListScreen
from Components.config import config, configfile, ConfigSubsection, ConfigSelection, getConfigListEntry
from Components.Pixmap import Pixmap
from Tools.Directories import resolveFilename, SCOPE_ACTIVE_SKIN
from os import path as os_path
modelist = [
("0", _("All supported")),
("5", _("Beyonwiz T3 (0xABCD)")),
("10", _("Beyonwiz T3 alternate (0xAE97)")),
("6", _("Beyonwiz (0x02F2)")),
("7", _("Beyonwiz (0x02F3)")),
("8", _("Beyonwiz (0x02F4)")),
# ("1", _("INI3000 (0x0932)")),
# ("2", _("INI7000 (0x0831")),
("3", _("HDx (0x0933)")),
# ("4", _("MIRACLEBOX (0x02F9)")),
# ("9", _("YHGD2580 (0x08F7)")),
]
config.plugins.RCSetup = ConfigSubsection()
config.plugins.RCSetup.mode = ConfigSelection(choices=modelist, default="0")
def applySettings():
f = open("/proc/stb/ir/rc/type", "w")
f.write("%d" % int(config.plugins.RCSetup.mode.value))
f.close()
class RCSetupScreen(Screen, ConfigListScreen):
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("Remote control code settings"))
self.previewPath = ""
self.list = []
ConfigListScreen.__init__(self, self.list)
self["key_red"] = Label(_("Exit"))
self["key_green"] = Label(_("Save"))
self["Preview"] = Pixmap()
self["actions"] = ActionMap(["SetupActions", "ColorActions"], {
"ok": self.keyGo,
"save": self.keyGo,
"cancel": self.keyCancel,
"green": self.keyGo,
"red": self.keyCancel,
}, -2)
self.mode = ConfigSelection(choices=modelist, default=config.plugins.RCSetup.mode.value)
self.list.append(getConfigListEntry(_("Remote"), self.mode))
self["config"].list = self.list
self["config"].l.setList(self.list)
self.grabLastGoodMode()
self.picload = ePicLoad()
self.picload.PictureData.get().append(self.showPic)
self.current_sel = self["config"].getCurrent()[1]
self.onLayoutFinish.append(self.layoutFinished)
def showPic(self, picInfo=""):
ptr = self.picload.getData()
if ptr is not None:
self["Preview"].instance.setPixmap(ptr.__deref__())
self["Preview"].show()
def layoutFinished(self):
self.picload.setPara((self["Preview"].instance.size().width(), self["Preview"].instance.size().height(), 1.0, 1, 1, 1, "#FF000000"))
self.loadPreview()
def grabLastGoodMode(self):
self.last_good = config.plugins.RCSetup.mode.value
def keyGo(self):
config.plugins.RCSetup.mode.value = self.mode.value
applySettings()
RC = config.plugins.RCSetup.mode.value
if (RC) != self.last_good:
from Screens.MessageBox import MessageBox
self.session.openWithCallback(self.confirm, MessageBox, _("Is this remote OK?"), MessageBox.TYPE_YESNO, timeout=15, default=False)
else:
config.plugins.RCSetup.save()
self.close()
def confirm(self, confirmed):
if not confirmed:
config.plugins.RCSetup.mode.value = self.last_good[0]
applySettings()
else:
applySettings()
config.plugins.RCSetup.mode.save()
configfile.save()
self.keySave()
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.current_sel = self["config"].getCurrent()[1]
self.loadPreview()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.current_sel = self["config"].getCurrent()[1]
self.loadPreview()
def keyCancel(self):
applySettings()
self.close()
def loadPreview(self):
root = "/usr/lib/enigma2/python/Plugins/SystemPlugins/RemoteControlCode/img/ini"
pngpath = root + self.current_sel.value + "/rc.png"
if not os_path.exists(pngpath):
pngpath = resolveFilename(SCOPE_ACTIVE_SKIN, "noprev.png")
if self.previewPath != pngpath:
self.previewPath = pngpath
self.picload.startDecode(self.previewPath)
def main(session, **kwargs):
session.open(RCSetupScreen)
def RemoteControlSetup(menuid, **kwargs):
if menuid == "system":
return [(_("Remote Control Code"), main, "remotecontrolcode", 50)]
else:
return []
def Plugins(**kwargs):
if os_path.exists("/proc/stb/ir/rc/type"):
applySettings()
from Plugins.Plugin import PluginDescriptor
return [PluginDescriptor(name=_("Remote Control Code"), where=PluginDescriptor.WHERE_MENU, needsRestart=False, fnc=RemoteControlSetup)]
return []
| gpl-2.0 |
sharma1nitish/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/config/committervalidator_unittest.py | 120 | 2649 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.host_mock import MockHost
from .committervalidator import CommitterValidator
class CommitterValidatorTest(unittest.TestCase):
def test_flag_permission_rejection_message(self):
validator = CommitterValidator(MockHost())
self.assertEqual(validator._committers_py_path(), "Tools/Scripts/webkitpy/common/config/committers.py")
expected_messsage = """foo@foo.com does not have review permissions according to http://trac.webkit.org/browser/trunk/Tools/Scripts/webkitpy/common/config/committers.py.
- If you do not have review rights please read http://webkit.org/coding/contributing.html for instructions on how to use bugzilla flags.
- If you have review rights please correct the error in Tools/Scripts/webkitpy/common/config/committers.py by adding yourself to the file (no review needed). The commit-queue restarts itself every 2 hours. After restart the commit-queue will correctly respect your review rights."""
self.assertMultiLineEqual(validator._flag_permission_rejection_message("foo@foo.com", "review"), expected_messsage)
| bsd-3-clause |
optima-ict/odoo | addons/account_asset/account_asset.py | 5 | 28027 | # -*- coding: utf-8 -*-
from datetime import date, datetime
from dateutil.relativedelta import relativedelta
from openerp import api, fields, models, _
from openerp.exceptions import UserError, ValidationError
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DF
class AccountAssetCategory(models.Model):
_name = 'account.asset.category'
_description = 'Asset category'
active = fields.Boolean(default=True)
name = fields.Char(required=True, index=True, string="Asset Type")
account_analytic_id = fields.Many2one('account.analytic.account', string='Analytic Account', domain=[('account_type', '=', 'normal')])
account_asset_id = fields.Many2one('account.account', string='Asset Account', required=True, domain=[('internal_type','=','other'), ('deprecated', '=', False)])
account_income_recognition_id = fields.Many2one('account.account', string='Recognition Income Account', domain=[('internal_type','=','other'), ('deprecated', '=', False)], oldname='account_expense_depreciation_id')
account_depreciation_id = fields.Many2one('account.account', string='Depreciation Account', required=True, domain=[('internal_type','=','other'), ('deprecated', '=', False)])
journal_id = fields.Many2one('account.journal', string='Journal', required=True)
company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env['res.company']._company_default_get('account.asset.category'))
method = fields.Selection([('linear', 'Linear'), ('degressive', 'Degressive')], string='Computation Method', required=True, default='linear',
help="Choose the method to use to compute the amount of depreciation lines.\n"
" * Linear: Calculated on basis of: Gross Value / Number of Depreciations\n"
" * Degressive: Calculated on basis of: Residual Value * Degressive Factor")
method_number = fields.Integer(string='Number of Depreciations', default=5, help="The number of depreciations needed to depreciate your asset")
method_period = fields.Integer(string='Period Length', default=1, help="State here the time between 2 depreciations, in months", required=True)
method_progress_factor = fields.Float('Degressive Factor', default=0.3)
method_time = fields.Selection([('number', 'Number of Depreciations'), ('end', 'Ending Date')], string='Time Method', required=True, default='number',
help="Choose the method to use to compute the dates and number of depreciation lines.\n"
" * Number of Depreciations: Fix the number of depreciation lines and the time between 2 depreciations.\n"
" * Ending Date: Choose the time between 2 depreciations and the date the depreciations won't go beyond.")
method_end = fields.Date('Ending date')
prorata = fields.Boolean(string='Prorata Temporis', help='Indicates that the first depreciation entry for this asset have to be done from the purchase date instead of the first of January')
open_asset = fields.Boolean(string='Post Journal Entries', help="Check this if you want to automatically confirm the assets of this category when created by invoices.")
type = fields.Selection([('sale', 'Sale: Revenue Recognition'), ('purchase', 'Purchase: Asset')], required=True, index=True, default='purchase')
@api.onchange('type')
def onchange_type(self):
if self.type == 'sale':
self.prorata = True
self.method_period = 1
else:
self.method_period = 12
class AccountAssetAsset(models.Model):
_name = 'account.asset.asset'
_description = 'Asset/Revenue Recognition'
_inherit = ['mail.thread', 'ir.needaction_mixin']
account_move_ids = fields.One2many('account.move', 'asset_id', string='Entries', readonly=True, states={'draft': [('readonly', False)]})
entry_count = fields.Integer(compute='_entry_count', string='# Asset Entries')
name = fields.Char(string='Asset Name', required=True, readonly=True, states={'draft': [('readonly', False)]})
code = fields.Char(string='Reference', size=32, readonly=True, states={'draft': [('readonly', False)]})
value = fields.Float(string='Gross Value', required=True, readonly=True, digits=0, states={'draft': [('readonly', False)]}, oldname='purchase_value')
currency_id = fields.Many2one('res.currency', string='Currency', required=True, readonly=True, states={'draft': [('readonly', False)]},
default=lambda self: self.env.user.company_id.currency_id.id)
company_id = fields.Many2one('res.company', string='Company', required=True, readonly=True, states={'draft': [('readonly', False)]},
default=lambda self: self.env['res.company']._company_default_get('account.asset.asset'))
note = fields.Text()
category_id = fields.Many2one('account.asset.category', string='Category', required=True, change_default=True, readonly=True, states={'draft': [('readonly', False)]})
date = fields.Date(string='Date', required=True, readonly=True, states={'draft': [('readonly', False)]}, default=fields.Date.context_today, oldname="purchase_date")
state = fields.Selection([('draft', 'Draft'), ('open', 'Running'), ('close', 'Close')], 'Status', required=True, copy=False, default='draft',
help="When an asset is created, the status is 'Draft'.\n"
"If the asset is confirmed, the status goes in 'Running' and the depreciation lines can be posted in the accounting.\n"
"You can manually close an asset when the depreciation is over. If the last line of depreciation is posted, the asset automatically goes in that status.")
active = fields.Boolean(default=True)
partner_id = fields.Many2one('res.partner', string='Partner', readonly=True, states={'draft': [('readonly', False)]})
method = fields.Selection([('linear', 'Linear'), ('degressive', 'Degressive')], string='Computation Method', required=True, readonly=True, states={'draft': [('readonly', False)]}, default='linear',
help="Choose the method to use to compute the amount of depreciation lines.\n * Linear: Calculated on basis of: Gross Value / Number of Depreciations\n"
" * Degressive: Calculated on basis of: Residual Value * Degressive Factor")
method_number = fields.Integer(string='Number of Depreciations', readonly=True, states={'draft': [('readonly', False)]}, default=5, help="The number of depreciations needed to depreciate your asset")
method_period = fields.Integer(string='Number of Months in a Period', required=True, readonly=True, default=12, states={'draft': [('readonly', False)]},
help="The amount of time between two depreciations, in months")
method_end = fields.Date(string='Ending Date', readonly=True, states={'draft': [('readonly', False)]})
method_progress_factor = fields.Float(string='Degressive Factor', readonly=True, default=0.3, states={'draft': [('readonly', False)]})
value_residual = fields.Float(compute='_amount_residual', method=True, digits=0, string='Residual Value')
method_time = fields.Selection([('number', 'Number of Depreciations'), ('end', 'Ending Date')], string='Time Method', required=True, readonly=True, default='number', states={'draft': [('readonly', False)]},
help="Choose the method to use to compute the dates and number of depreciation lines.\n"
" * Number of Depreciations: Fix the number of depreciation lines and the time between 2 depreciations.\n"
" * Ending Date: Choose the time between 2 depreciations and the date the depreciations won't go beyond.")
prorata = fields.Boolean(string='Prorata Temporis', readonly=True, states={'draft': [('readonly', False)]},
help='Indicates that the first depreciation entry for this asset have to be done from the purchase date instead of the first January / Start date of fiscal year')
depreciation_line_ids = fields.One2many('account.asset.depreciation.line', 'asset_id', string='Depreciation Lines', readonly=True, states={'draft': [('readonly', False)], 'open': [('readonly', False)]})
salvage_value = fields.Float(string='Salvage Value', digits=0, readonly=True, states={'draft': [('readonly', False)]},
help="It is the amount you plan to have that you cannot depreciate.")
invoice_id = fields.Many2one('account.invoice', string='Invoice', states={'draft': [('readonly', False)]}, copy=False)
type = fields.Selection(related="category_id.type", string='Type', required=True)
@api.multi
def unlink(self):
for asset in self:
if asset.state in ['open', 'close']:
raise UserError(_('You cannot delete a document is in %s state.') % (asset.state,))
if asset.account_move_ids:
raise UserError(_('You cannot delete a document that contains posted entries.'))
return super(AccountAssetAsset, self).unlink()
@api.multi
def _get_last_depreciation_date(self):
"""
@param id: ids of a account.asset.asset objects
@return: Returns a dictionary of the effective dates of the last depreciation entry made for given asset ids. If there isn't any, return the purchase date of this asset
"""
self.env.cr.execute("""
SELECT a.id as id, COALESCE(MAX(m.date),a.date) AS date
FROM account_asset_asset a
LEFT JOIN account_move m ON (m.asset_id = a.id)
WHERE a.id IN %s
GROUP BY a.id, a.date """, (tuple(self.ids),))
result = dict(self.env.cr.fetchall())
return result
@api.model
def _cron_generate_entries(self):
assets = self.env['account.asset.asset'].search([('state', '=', 'open')])
assets._compute_entries(datetime.today())
def _compute_board_amount(self, sequence, residual_amount, amount_to_depr, undone_dotation_number, posted_depreciation_line_ids, total_days, depreciation_date):
amount = 0
if sequence == undone_dotation_number:
amount = residual_amount
else:
if self.method == 'linear':
amount = amount_to_depr / (undone_dotation_number - len(posted_depreciation_line_ids))
if self.prorata and self.category_id.type == 'purchase':
amount = amount_to_depr / self.method_number
days = total_days - float(depreciation_date.strftime('%j'))
if sequence == 1:
amount = (amount_to_depr / self.method_number) / total_days * days
elif sequence == undone_dotation_number:
amount = (amount_to_depr / self.method_number) / total_days * (total_days - days)
elif self.method == 'degressive':
amount = residual_amount * self.method_progress_factor
if self.prorata:
days = total_days - float(depreciation_date.strftime('%j'))
if sequence == 1:
amount = (residual_amount * self.method_progress_factor) / total_days * days
elif sequence == undone_dotation_number:
amount = (residual_amount * self.method_progress_factor) / total_days * (total_days - days)
return amount
def _compute_board_undone_dotation_nb(self, depreciation_date, total_days):
undone_dotation_number = self.method_number
if self.method_time == 'end':
end_date = datetime.strptime(self.method_end, DF).date()
undone_dotation_number = 0
while depreciation_date <= end_date:
depreciation_date = date(depreciation_date.year, depreciation_date.month, depreciation_date.day) + relativedelta(months=+self.method_period)
undone_dotation_number += 1
if self.prorata and self.category_id.type == 'purchase':
undone_dotation_number += 1
return undone_dotation_number
@api.multi
def compute_depreciation_board(self):
self.ensure_one()
posted_depreciation_line_ids = self.depreciation_line_ids.filtered(lambda x: x.move_check)
unposted_depreciation_line_ids = self.depreciation_line_ids.filtered(lambda x: not x.move_check)
# Remove old unposted depreciation lines. We cannot use unlink() with One2many field
commands = [(2, line_id.id, False) for line_id in unposted_depreciation_line_ids]
if self.value_residual != 0.0:
amount_to_depr = residual_amount = self.value_residual
if self.prorata:
depreciation_date = datetime.strptime(self._get_last_depreciation_date()[self.id], DF).date()
else:
# depreciation_date = 1st of January of purchase year
asset_date = datetime.strptime(self.date, DF).date()
# if we already have some previous validated entries, starting date isn't 1st January but last entry + method period
if posted_depreciation_line_ids and posted_depreciation_line_ids[0].depreciation_date:
last_depreciation_date = datetime.strptime(posted_depreciation_line_ids[0].depreciation_date, DF).date()
depreciation_date = last_depreciation_date + relativedelta(months=+self.method_period)
else:
depreciation_date = asset_date
day = depreciation_date.day
month = depreciation_date.month
year = depreciation_date.year
total_days = (year % 4) and 365 or 366
undone_dotation_number = self._compute_board_undone_dotation_nb(depreciation_date, total_days)
for x in range(len(posted_depreciation_line_ids), undone_dotation_number):
sequence = x + 1
amount = self._compute_board_amount(sequence, residual_amount, amount_to_depr, undone_dotation_number, posted_depreciation_line_ids, total_days, depreciation_date)
amount = self.currency_id.round(amount)
residual_amount -= amount
vals = {
'amount': amount,
'asset_id': self.id,
'sequence': sequence,
'name': (self.code or '') + '/' + str(sequence),
'remaining_value': residual_amount,
'depreciated_value': self.value - (self.salvage_value + residual_amount),
'depreciation_date': depreciation_date.strftime(DF),
}
commands.append((0, False, vals))
# Considering Depr. Period as months
depreciation_date = date(year, month, day) + relativedelta(months=+self.method_period)
day = depreciation_date.day
month = depreciation_date.month
year = depreciation_date.year
self.write({'depreciation_line_ids': commands})
return True
@api.multi
def validate(self):
self.write({'state': 'open'})
fields = [
'method',
'method_number',
'method_period',
'method_end',
'method_progress_factor',
'method_time',
'salvage_value',
'invoice_id',
]
ref_tracked_fields = self.env['account.asset.asset'].fields_get(fields)
for asset in self:
tracked_fields = ref_tracked_fields.copy()
if asset.method == 'linear':
del(tracked_fields['method_progress_factor'])
if asset.method_time != 'end':
del(tracked_fields['method_end'])
else:
del(tracked_fields['method_number'])
dummy, tracking_value_ids = asset._message_track(tracked_fields, dict.fromkeys(fields))
asset.message_post(subject=_('Asset created'), tracking_value_ids=tracking_value_ids)
@api.multi
def set_to_close(self):
move_ids = []
for asset in self:
unposted_depreciation_line_ids = asset.depreciation_line_ids.filtered(lambda x: not x.move_check)
if unposted_depreciation_line_ids:
old_values = {
'method_end': asset.method_end,
'method_number': asset.method_number,
}
# Remove all unposted depr. lines
commands = [(2, line_id.id, False) for line_id in unposted_depreciation_line_ids]
# Create a new depr. line with the residual amount and post it
sequence = len(asset.depreciation_line_ids) - len(unposted_depreciation_line_ids) + 1
today = datetime.today().strftime(DF)
vals = {
'amount': asset.value_residual,
'asset_id': asset.id,
'sequence': sequence,
'name': (asset.code or '') + '/' + str(sequence),
'remaining_value': 0,
'depreciated_value': asset.value - asset.salvage_value, # the asset is completely depreciated
'depreciation_date': today,
}
commands.append((0, False, vals))
asset.write({'depreciation_line_ids': commands, 'method_end': today, 'method_number': sequence})
tracked_fields = self.env['account.asset.asset'].fields_get(['method_number', 'method_end'])
changes, tracking_value_ids = asset._message_track(tracked_fields, old_values)
if changes:
asset.message_post(subject=_('Asset sold or disposed. Accounting entry awaiting for validation.'), tracking_value_ids=tracking_value_ids)
move_ids += asset.depreciation_line_ids[-1].create_move(post_move=False)
if move_ids:
name = _('Disposal Move')
view_mode = 'form'
if len(move_ids) > 1:
name = _('Disposal Moves')
view_mode = 'tree,form'
return {
'name': name,
'view_type': 'form',
'view_mode': view_mode,
'res_model': 'account.move',
'type': 'ir.actions.act_window',
'target': 'current',
'res_id': move_ids[0],
}
@api.multi
def set_to_draft(self):
self.write({'state': 'draft'})
@api.one
@api.depends('value', 'salvage_value', 'depreciation_line_ids')
def _amount_residual(self):
total_amount = 0.0
for line in self.depreciation_line_ids:
if line.move_check:
total_amount += line.amount
self.value_residual = self.value - total_amount - self.salvage_value
@api.onchange('company_id')
def onchange_company_id(self):
self.currency_id = self.company_id.currency_id.id
@api.multi
@api.depends('account_move_ids')
def _entry_count(self):
for asset in self:
asset.entry_count = self.env['account.move'].search_count([('asset_id', '=', asset.id)])
@api.one
@api.constrains('prorata', 'method_time')
def _check_prorata(self):
if self.prorata and self.method_time != 'number':
raise ValidationError(_('Prorata temporis can be applied only for time method "number of depreciations".'))
@api.onchange('category_id')
def onchange_category_id(self):
vals = self.onchange_category_id_values(self.category_id.id)
# We cannot use 'write' on an object that doesn't exist yet
if vals:
for k, v in vals['value'].iteritems():
setattr(self, k, v)
def onchange_category_id_values(self, category_id):
if category_id:
category = self.env['account.asset.category'].browse(category_id)
return {
'value': {
'method': category.method,
'method_number': category.method_number,
'method_time': category.method_time,
'method_period': category.method_period,
'method_progress_factor': category.method_progress_factor,
'method_end': category.method_end,
'prorata': category.prorata,
}
}
@api.onchange('method_time')
def onchange_method_time(self):
if self.method_time != 'number':
self.prorata = False
@api.multi
def copy_data(self, default=None):
if default is None:
default = {}
default['name'] = self.name + _(' (copy)')
return super(AccountAssetAsset, self).copy_data(default)[0]
@api.multi
def _compute_entries(self, date):
depreciation_ids = self.env['account.asset.depreciation.line'].search([
('asset_id', 'in', self.ids), ('depreciation_date', '<=', date),
('move_check', '=', False)])
return depreciation_ids.create_move()
@api.model
def create(self, vals):
asset = super(AccountAssetAsset, self.with_context(mail_create_nolog=True)).create(vals)
asset.compute_depreciation_board()
return asset
@api.multi
def write(self, vals):
res = super(AccountAssetAsset, self).write(vals)
if 'depreciation_line_ids' not in vals:
self.compute_depreciation_board()
return res
@api.multi
def open_entries(self):
return {
'name': _('Journal Entries'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.move',
'view_id': False,
'type': 'ir.actions.act_window',
'context': dict(self.env.context or {}, search_default_asset_id=self.id, default_asset_id=self.id),
}
class AccountAssetDepreciationLine(models.Model):
_name = 'account.asset.depreciation.line'
_description = 'Asset depreciation line'
name = fields.Char(string='Depreciation Name', required=True, index=True)
sequence = fields.Integer(required=True)
asset_id = fields.Many2one('account.asset.asset', string='Asset', required=True, ondelete='cascade')
parent_state = fields.Selection(related='asset_id.state', string='State of Asset')
amount = fields.Float(string='Current Depreciation', digits=0, required=True)
remaining_value = fields.Float(string='Next Period Depreciation', digits=0, required=True)
depreciated_value = fields.Float(string='Cumulative Depreciation', required=True)
depreciation_date = fields.Date('Depreciation Date', index=True)
move_id = fields.Many2one('account.move', string='Depreciation Entry')
move_check = fields.Boolean(compute='_get_move_check', string='Posted', track_visibility='always', store=True)
@api.one
@api.depends('move_id')
def _get_move_check(self):
self.move_check = bool(self.move_id)
@api.multi
def create_move(self, post_move=True):
created_moves = self.env['account.move']
for line in self:
depreciation_date = self.env.context.get('depreciation_date') or line.depreciation_date or fields.Date.context_today(self)
company_currency = line.asset_id.company_id.currency_id
current_currency = line.asset_id.currency_id
amount = company_currency.compute(line.amount, current_currency)
sign = (line.asset_id.category_id.journal_id.type == 'purchase' or line.asset_id.category_id.journal_id.type == 'sale' and 1) or -1
asset_name = line.asset_id.name + ' (%s/%s)' % (line.sequence, line.asset_id.method_number)
reference = line.asset_id.code
journal_id = line.asset_id.category_id.journal_id.id
partner_id = line.asset_id.partner_id.id
categ_type = line.asset_id.category_id.type
debit_account = line.asset_id.category_id.account_asset_id.id
credit_account = line.asset_id.category_id.account_depreciation_id.id
move_line_1 = {
'name': asset_name,
'account_id': credit_account,
'debit': 0.0,
'credit': amount,
'journal_id': journal_id,
'partner_id': partner_id,
'currency_id': company_currency != current_currency and current_currency or False,
'amount_currency': company_currency != current_currency and - sign * line.amount or 0.0,
'analytic_account_id': line.asset_id.category_id.account_analytic_id.id if categ_type == 'sale' else False,
'date': depreciation_date,
}
move_line_2 = {
'name': asset_name,
'account_id': debit_account,
'credit': 0.0,
'debit': amount,
'journal_id': journal_id,
'partner_id': partner_id,
'currency_id': company_currency != current_currency and current_currency or False,
'amount_currency': company_currency != current_currency and sign * line.amount or 0.0,
'analytic_account_id': line.asset_id.category_id.account_analytic_id.id if categ_type == 'purchase' else False,
'date': depreciation_date,
}
move_vals = {
'ref': reference,
'date': depreciation_date or False,
'journal_id': line.asset_id.category_id.journal_id.id,
'line_ids': [(0, 0, move_line_1), (0, 0, move_line_2)],
'asset_id': line.asset_id.id,
}
move = self.env['account.move'].create(move_vals)
line.write({'move_id': move.id, 'move_check': True})
created_moves |= move
if post_move and created_moves:
created_moves.post()
return [x.id for x in created_moves]
@api.multi
def post_lines_and_close_asset(self):
# we re-evaluate the assets to determine whether we can close them
for line in self:
line.log_message_when_posted()
asset = line.asset_id
if asset.currency_id.is_zero(asset.value_residual):
asset.message_post(body=_("Document closed."))
asset.write({'state': 'close'})
@api.multi
def log_message_when_posted(self):
def _format_message(message_description, tracked_values):
message = ''
if message_description:
message = '<span>%s</span>' % message_description
for name, values in tracked_values.iteritems():
message += '<div> • <b>%s</b>: ' % name
message += '%s</div>' % values
return message
for line in self:
if line.move_id and line.move_id.state == 'draft':
partner_name = line.asset_id.partner_id.name
currency_name = line.asset_id.currency_id.name
msg_values = {_('Currency'): currency_name, _('Amount'): line.amount}
if partner_name:
msg_values[_('Partner')] = partner_name
msg = _format_message(_('Depreciation line posted.'), msg_values)
line.asset_id.message_post(body=msg)
@api.multi
def unlink(self):
for record in self:
if record.move_check:
if record.asset_id.category_id.type == 'purchase':
msg = _("You cannot delete posted depreciation lines.")
else:
msg = _("You cannot delete posted installment lines.")
raise UserError(msg)
return super(AccountAssetDepreciationLine, self).unlink()
class AccountMove(models.Model):
_inherit = 'account.move'
asset_id = fields.Many2one('account.asset.asset', string='Asset', ondelete="restrict")
@api.multi
def post(self):
for move in self:
if move.asset_id:
move.asset_id.depreciation_line_ids.post_lines_and_close_asset()
return super(AccountMove, self).post()
| agpl-3.0 |
XiaodunServerGroup/xiaodun-platform | common/test/acceptance/pages/lms/rubric.py | 6 | 4302 | """
Rubric for open-ended response problems, including calibration and peer-grading.
"""
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise, fulfill_after, fulfill_before
class ScoreMismatchError(Exception):
"""
The provided scores do not match the rubric on the page.
"""
pass
class RubricPage(PageObject):
"""
Rubric for open-ended response problems, including calibration and peer-grading.
"""
url = None
def is_browser_on_page(self):
"""
Return a boolean indicating whether the rubric is available.
"""
return self.is_css_present('div.rubric')
@property
def categories(self):
"""
Return a list of categories available in the essay rubric.
Example:
["Writing Applications", "Language Conventions"]
The rubric is not always visible; if it's not available,
this will return an empty list.
"""
return self.css_text('span.rubric-category')
def set_scores(self, scores):
"""
Set the rubric scores. `scores` is a list of integers
indicating the number of points in each category.
For example, `scores` might be [0, 2, 1] if the student scored
0 points in the first category, 2 points in the second category,
and 1 point in the third category.
If the number of scores does not match the number of categories,
a `ScoreMismatchError` is raised.
"""
# Warn if we have the wrong number of scores
num_categories = self.categories
if len(scores) != len(num_categories):
raise ScoreMismatchError(
"Received {0} scores but there are {1} rubric categories".format(
len(scores), num_categories))
# Set the score for each category
for score_index in range(len(scores)):
# Check that we have the enough radio buttons
category_css = "div.rubric>ul.rubric-list:nth-of-type({0})".format(score_index + 1)
if scores[score_index] > self.css_count(category_css + ' input.score-selection'):
raise ScoreMismatchError(
"Tried to select score {0} but there are only {1} options".format(
score_index, len(scores)))
# Check the radio button at the correct index
else:
input_css = (
category_css +
">li.rubric-list-item:nth-of-type({0}) input.score-selection".format(scores[score_index] + 1)
)
self.css_check(input_css)
@property
def feedback(self):
"""
Return a list of correct/incorrect feedback for each rubric category (e.g. from self-assessment).
Example: ['correct', 'incorrect']
If no feedback is available, returns an empty list.
If feedback could not be interpreted (unexpected CSS class),
the list will contain a `None` item.
"""
# Get the green checkmark / red x labels
# We need to filter out the similar-looking CSS classes
# for the rubric items that are NOT marked correct/incorrect
feedback_css = 'div.rubric-label>label'
labels = [
el_class for el_class in
self.css_map(feedback_css, lambda el: el['class'])
if el_class != 'rubric-elements-info'
]
def map_feedback(css_class):
"""
Map CSS classes on the labels to correct/incorrect
"""
if 'choicegroup_incorrect' in css_class:
return 'incorrect'
elif 'choicegroup_correct' in css_class:
return 'correct'
else:
return None
return map(map_feedback, labels)
def submit(self):
"""
Submit the rubric.
"""
# Wait for the button to become enabled
button_css = 'input.submit-button'
button_enabled = EmptyPromise(
lambda: all(self.css_map(button_css, lambda el: not el['disabled'])),
"Submit button enabled"
)
# Submit the assessment
with fulfill_before(button_enabled):
self.css_click(button_css)
| agpl-3.0 |
qxsch/QXSConsolas | examples/CopyThat/copyThat/requests/packages/chardet/chardistribution.py | 2755 | 9226 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
| gpl-3.0 |
ric2b/Vivaldi-browser | chromium/third_party/blink/web_tests/external/wpt/tools/third_party/html5lib/html5lib/tests/test_parser2.py | 30 | 5415 | from __future__ import absolute_import, division, unicode_literals
from six import PY2, text_type, unichr
import io
from . import support # noqa
from html5lib.constants import namespaces, tokenTypes
from html5lib import parse, parseFragment, HTMLParser
# tests that aren't autogenerated from text files
def test_assertDoctypeCloneable():
doc = parse('<!DOCTYPE HTML>', treebuilder="dom")
assert doc.cloneNode(True) is not None
def test_line_counter():
# http://groups.google.com/group/html5lib-discuss/browse_frm/thread/f4f00e4a2f26d5c0
assert parse("<pre>\nx\n>\n</pre>") is not None
def test_namespace_html_elements_0_dom():
doc = parse("<html></html>",
treebuilder="dom",
namespaceHTMLElements=True)
assert doc.childNodes[0].namespaceURI == namespaces["html"]
def test_namespace_html_elements_1_dom():
doc = parse("<html></html>",
treebuilder="dom",
namespaceHTMLElements=False)
assert doc.childNodes[0].namespaceURI is None
def test_namespace_html_elements_0_etree():
doc = parse("<html></html>",
treebuilder="etree",
namespaceHTMLElements=True)
assert doc.tag == "{%s}html" % (namespaces["html"],)
def test_namespace_html_elements_1_etree():
doc = parse("<html></html>",
treebuilder="etree",
namespaceHTMLElements=False)
assert doc.tag == "html"
def test_unicode_file():
assert parse(io.StringIO("a")) is not None
def test_maintain_attribute_order():
# This is here because we impl it in parser and not tokenizer
p = HTMLParser()
# generate loads to maximize the chance a hash-based mutation will occur
attrs = [(unichr(x), i) for i, x in enumerate(range(ord('a'), ord('z')))]
token = {'name': 'html',
'selfClosing': False,
'selfClosingAcknowledged': False,
'type': tokenTypes["StartTag"],
'data': attrs}
out = p.normalizeToken(token)
attr_order = list(out["data"].keys())
assert attr_order == [x for x, i in attrs]
def test_duplicate_attribute():
# This is here because we impl it in parser and not tokenizer
doc = parse('<p class=a class=b>')
el = doc[1][0]
assert el.get("class") == "a"
def test_maintain_duplicate_attribute_order():
# This is here because we impl it in parser and not tokenizer
p = HTMLParser()
attrs = [(unichr(x), i) for i, x in enumerate(range(ord('a'), ord('z')))]
token = {'name': 'html',
'selfClosing': False,
'selfClosingAcknowledged': False,
'type': tokenTypes["StartTag"],
'data': attrs + [('a', len(attrs))]}
out = p.normalizeToken(token)
attr_order = list(out["data"].keys())
assert attr_order == [x for x, i in attrs]
def test_debug_log():
parser = HTMLParser(debug=True)
parser.parse("<!doctype html><title>a</title><p>b<script>c</script>d</p>e")
expected = [('dataState', 'InitialPhase', 'InitialPhase', 'processDoctype', {'type': 'Doctype'}),
('dataState', 'BeforeHtmlPhase', 'BeforeHtmlPhase', 'processStartTag', {'name': 'title', 'type': 'StartTag'}),
('dataState', 'BeforeHeadPhase', 'BeforeHeadPhase', 'processStartTag', {'name': 'title', 'type': 'StartTag'}),
('dataState', 'InHeadPhase', 'InHeadPhase', 'processStartTag', {'name': 'title', 'type': 'StartTag'}),
('rcdataState', 'TextPhase', 'TextPhase', 'processCharacters', {'type': 'Characters'}),
('dataState', 'TextPhase', 'TextPhase', 'processEndTag', {'name': 'title', 'type': 'EndTag'}),
('dataState', 'InHeadPhase', 'InHeadPhase', 'processStartTag', {'name': 'p', 'type': 'StartTag'}),
('dataState', 'AfterHeadPhase', 'AfterHeadPhase', 'processStartTag', {'name': 'p', 'type': 'StartTag'}),
('dataState', 'InBodyPhase', 'InBodyPhase', 'processStartTag', {'name': 'p', 'type': 'StartTag'}),
('dataState', 'InBodyPhase', 'InBodyPhase', 'processCharacters', {'type': 'Characters'}),
('dataState', 'InBodyPhase', 'InBodyPhase', 'processStartTag', {'name': 'script', 'type': 'StartTag'}),
('dataState', 'InBodyPhase', 'InHeadPhase', 'processStartTag', {'name': 'script', 'type': 'StartTag'}),
('scriptDataState', 'TextPhase', 'TextPhase', 'processCharacters', {'type': 'Characters'}),
('dataState', 'TextPhase', 'TextPhase', 'processEndTag', {'name': 'script', 'type': 'EndTag'}),
('dataState', 'InBodyPhase', 'InBodyPhase', 'processCharacters', {'type': 'Characters'}),
('dataState', 'InBodyPhase', 'InBodyPhase', 'processEndTag', {'name': 'p', 'type': 'EndTag'}),
('dataState', 'InBodyPhase', 'InBodyPhase', 'processCharacters', {'type': 'Characters'})]
if PY2:
for i, log in enumerate(expected):
log = [x.encode("ascii") if isinstance(x, text_type) else x for x in log]
expected[i] = tuple(log)
assert parser.log == expected
def test_no_duplicate_clone():
frag = parseFragment("<b><em><foo><foob><fooc><aside></b></em>")
assert len(frag) == 2
def test_self_closing_col():
parser = HTMLParser()
parser.parseFragment('<table><colgroup><col /></colgroup></table>')
assert not parser.errors
| bsd-3-clause |
Omegaphora/external_chromium_org | tools/valgrind/chrome_tests.py | 32 | 31784 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' Runs various chrome tests through valgrind_test.py.'''
import glob
import logging
import multiprocessing
import optparse
import os
import stat
import sys
import logging_utils
import path_utils
import common
import valgrind_test
class TestNotFound(Exception): pass
class MultipleGTestFiltersSpecified(Exception): pass
class BuildDirNotFound(Exception): pass
class BuildDirAmbiguous(Exception): pass
class ChromeTests:
SLOW_TOOLS = ["memcheck", "tsan", "tsan_rv", "drmemory"]
LAYOUT_TESTS_DEFAULT_CHUNK_SIZE = 300
def __init__(self, options, args, test):
if ':' in test:
(self._test, self._gtest_filter) = test.split(':', 1)
else:
self._test = test
self._gtest_filter = options.gtest_filter
if self._test not in self._test_list:
raise TestNotFound("Unknown test: %s" % test)
if options.gtest_filter and options.gtest_filter != self._gtest_filter:
raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
"and --test %s" % test)
self._options = options
self._args = args
script_dir = path_utils.ScriptDir()
# Compute the top of the tree (the "source dir") from the script dir (where
# this script lives). We assume that the script dir is in tools/valgrind/
# relative to the top of the tree.
self._source_dir = os.path.dirname(os.path.dirname(script_dir))
# since this path is used for string matching, make sure it's always
# an absolute Unix-style path
self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
valgrind_test_script = os.path.join(script_dir, "valgrind_test.py")
self._command_preamble = ["--source-dir=%s" % (self._source_dir)]
if not self._options.build_dir:
dirs = [
os.path.join(self._source_dir, "xcodebuild", "Debug"),
os.path.join(self._source_dir, "out", "Debug"),
os.path.join(self._source_dir, "build", "Debug"),
]
build_dir = [d for d in dirs if os.path.isdir(d)]
if len(build_dir) > 1:
raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
"%s\nPlease specify just one "
"using --build-dir" % ", ".join(build_dir))
elif build_dir:
self._options.build_dir = build_dir[0]
else:
self._options.build_dir = None
if self._options.build_dir:
build_dir = os.path.abspath(self._options.build_dir)
self._command_preamble += ["--build-dir=%s" % (self._options.build_dir)]
def _EnsureBuildDirFound(self):
if not self._options.build_dir:
raise BuildDirNotFound("Oops, couldn't find a build dir, please "
"specify it manually using --build-dir")
def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
'''Generates the default command array that most tests will use.'''
if exe and common.IsWindows():
exe += '.exe'
cmd = list(self._command_preamble)
# Find all suppressions matching the following pattern:
# tools/valgrind/TOOL/suppressions[_PLATFORM].txt
# and list them with --suppressions= prefix.
script_dir = path_utils.ScriptDir()
tool_name = tool.ToolName();
suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt")
if os.path.exists(suppression_file):
cmd.append("--suppressions=%s" % suppression_file)
# Platform-specific suppression
for platform in common.PlatformNames():
platform_suppression_file = \
os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform)
if os.path.exists(platform_suppression_file):
cmd.append("--suppressions=%s" % platform_suppression_file)
if self._options.valgrind_tool_flags:
cmd += self._options.valgrind_tool_flags.split(" ")
if self._options.keep_logs:
cmd += ["--keep_logs"]
if valgrind_test_args != None:
for arg in valgrind_test_args:
cmd.append(arg)
if exe:
self._EnsureBuildDirFound()
cmd.append(os.path.join(self._options.build_dir, exe))
# Valgrind runs tests slowly, so slow tests hurt more; show elapased time
# so we can find the slowpokes.
cmd.append("--gtest_print_time")
# Built-in test launcher for gtest-based executables runs tests using
# multiple process by default. Force the single-process mode back.
cmd.append("--single-process-tests")
if self._options.gtest_repeat:
cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
if self._options.gtest_shuffle:
cmd.append("--gtest_shuffle")
if self._options.brave_new_test_launcher:
cmd.append("--brave-new-test-launcher")
if self._options.test_launcher_bot_mode:
cmd.append("--test-launcher-bot-mode")
return cmd
def Run(self):
''' Runs the test specified by command-line argument --test '''
logging.info("running test %s" % (self._test))
return self._test_list[self._test](self)
def _AppendGtestFilter(self, tool, name, cmd):
'''Append an appropriate --gtest_filter flag to the googletest binary
invocation.
If the user passed his own filter mentioning only one test, just use it.
Othewise, filter out tests listed in the appropriate gtest_exclude files.
'''
if (self._gtest_filter and
":" not in self._gtest_filter and
"?" not in self._gtest_filter and
"*" not in self._gtest_filter):
cmd.append("--gtest_filter=%s" % self._gtest_filter)
return
filters = []
gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")
gtest_filter_files = [
os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]
# Use ".gtest.txt" files only for slow tools, as they now contain
# Valgrind- and Dr.Memory-specific filters.
# TODO(glider): rename the files to ".gtest_slow.txt"
if tool.ToolName() in ChromeTests.SLOW_TOOLS:
gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]
for platform_suffix in common.PlatformNames():
gtest_filter_files += [
os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),
os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \
(tool.ToolName(), platform_suffix))]
logging.info("Reading gtest exclude filter files:")
for filename in gtest_filter_files:
# strip the leading absolute path (may be very long on the bot)
# and the following / or \.
readable_filename = filename.replace("\\", "/") # '\' on Windows
readable_filename = readable_filename.replace(self._source_dir, "")[1:]
if not os.path.exists(filename):
logging.info(" \"%s\" - not found" % readable_filename)
continue
logging.info(" \"%s\" - OK" % readable_filename)
f = open(filename, 'r')
for line in f.readlines():
if line.startswith("#") or line.startswith("//") or line.isspace():
continue
line = line.rstrip()
test_prefixes = ["FLAKY", "FAILS"]
for p in test_prefixes:
# Strip prefixes from the test names.
line = line.replace(".%s_" % p, ".")
# Exclude the original test name.
filters.append(line)
if line[-2:] != ".*":
# List all possible prefixes if line doesn't end with ".*".
for p in test_prefixes:
filters.append(line.replace(".", ".%s_" % p))
# Get rid of duplicates.
filters = set(filters)
gtest_filter = self._gtest_filter
if len(filters):
if gtest_filter:
gtest_filter += ":"
if gtest_filter.find("-") < 0:
gtest_filter += "-"
else:
gtest_filter = "-"
gtest_filter += ":".join(filters)
if gtest_filter:
cmd.append("--gtest_filter=%s" % gtest_filter)
@staticmethod
def ShowTests():
test_to_names = {}
for name, test_function in ChromeTests._test_list.iteritems():
test_to_names.setdefault(test_function, []).append(name)
name_to_aliases = {}
for names in test_to_names.itervalues():
names.sort(key=lambda name: len(name))
name_to_aliases[names[0]] = names[1:]
print
print "Available tests:"
print "----------------"
for name, aliases in sorted(name_to_aliases.iteritems()):
if aliases:
print " {} (aka {})".format(name, ', '.join(aliases))
else:
print " {}".format(name)
def SetupLdPath(self, requires_build_dir):
if requires_build_dir:
self._EnsureBuildDirFound()
elif not self._options.build_dir:
return
# Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
if (os.getenv("LD_LIBRARY_PATH")):
os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
self._options.build_dir))
else:
os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool, name, valgrind_test_args)
self._AppendGtestFilter(tool, name, cmd)
cmd.extend(['--test-tiny-timeout=1000'])
if cmd_args:
cmd.extend(cmd_args)
self.SetupLdPath(True)
return tool.Run(cmd, module)
def RunCmdLine(self):
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool, None, self._args)
self.SetupLdPath(False)
return tool.Run(cmd, None)
def TestAccessibility(self):
return self.SimpleTest("accessibility", "accessibility_unittests")
def TestAddressInput(self):
return self.SimpleTest("addressinput", "libaddressinput_unittests")
def TestAngle(self):
return self.SimpleTest("angle", "angle_unittests")
def TestAppList(self):
return self.SimpleTest("app_list", "app_list_unittests")
def TestAsh(self):
return self.SimpleTest("ash", "ash_unittests")
def TestAshShell(self):
return self.SimpleTest("ash_shelf", "ash_shell_unittests")
def TestAura(self):
return self.SimpleTest("aura", "aura_unittests")
def TestBase(self):
return self.SimpleTest("base", "base_unittests")
def TestBlinkHeap(self):
return self.SimpleTest("blink_heap", "blink_heap_unittests")
def TestBlinkPlatform(self):
return self.SimpleTest("blink_platform", "blink_platform_unittests")
def TestCacheInvalidation(self):
return self.SimpleTest("cacheinvalidation", "cacheinvalidation_unittests")
def TestCast(self):
return self.SimpleTest("chrome", "cast_unittests")
def TestCC(self):
return self.SimpleTest("cc", "cc_unittests")
def TestChromeApp(self):
return self.SimpleTest("chrome_app", "chrome_app_unittests")
def TestChromeElf(self):
return self.SimpleTest("chrome_elf", "chrome_elf_unittests")
def TestChromeDriver(self):
return self.SimpleTest("chromedriver", "chromedriver_unittests")
def TestChromeOS(self):
return self.SimpleTest("chromeos", "chromeos_unittests")
def TestCloudPrint(self):
return self.SimpleTest("cloud_print", "cloud_print_unittests")
def TestComponents(self):
return self.SimpleTest("components", "components_unittests")
def TestCompositor(self):
return self.SimpleTest("compositor", "compositor_unittests")
def TestContent(self):
return self.SimpleTest("content", "content_unittests")
def TestCourgette(self):
return self.SimpleTest("courgette", "courgette_unittests")
def TestCrypto(self):
return self.SimpleTest("crypto", "crypto_unittests")
def TestDevice(self):
return self.SimpleTest("device", "device_unittests")
def TestDisplay(self):
return self.SimpleTest("display", "display_unittests")
def TestEvents(self):
return self.SimpleTest("events", "events_unittests")
def TestExtensions(self):
return self.SimpleTest("extensions", "extensions_unittests")
def TestFFmpeg(self):
return self.SimpleTest("chrome", "ffmpeg_unittests")
def TestFFmpegRegressions(self):
return self.SimpleTest("chrome", "ffmpeg_regression_tests")
def TestGCM(self):
return self.SimpleTest("gcm", "gcm_unit_tests")
def TestGfx(self):
return self.SimpleTest("gfx", "gfx_unittests")
def TestGin(self):
return self.SimpleTest("gin", "gin_unittests")
def TestGoogleApis(self):
return self.SimpleTest("google_apis", "google_apis_unittests")
def TestGPU(self):
return self.SimpleTest("gpu", "gpu_unittests")
def TestIpc(self):
return self.SimpleTest("ipc", "ipc_tests",
valgrind_test_args=["--trace_children"])
def TestInstallerUtil(self):
return self.SimpleTest("installer_util", "installer_util_unittests")
def TestJingle(self):
return self.SimpleTest("chrome", "jingle_unittests")
def TestKeyboard(self):
return self.SimpleTest("keyboard", "keyboard_unittests")
def TestMedia(self):
return self.SimpleTest("chrome", "media_unittests")
def TestMessageCenter(self):
return self.SimpleTest("message_center", "message_center_unittests")
def TestMojoAppsJS(self):
return self.SimpleTest("mojo_apps_js", "mojo_apps_js_unittests")
def TestMojoCommon(self):
return self.SimpleTest("mojo_common", "mojo_common_unittests")
def TestMojoJS(self):
return self.SimpleTest("mojo_js", "mojo_js_unittests")
def TestMojoPublicBindings(self):
return self.SimpleTest("mojo_public_bindings",
"mojo_public_bindings_unittests")
def TestMojoPublicEnv(self):
return self.SimpleTest("mojo_public_env",
"mojo_public_environment_unittests")
def TestMojoPublicSystem(self):
return self.SimpleTest("mojo_public_system",
"mojo_public_system_unittests")
def TestMojoPublicSysPerf(self):
return self.SimpleTest("mojo_public_sysperf",
"mojo_public_system_perftests")
def TestMojoPublicUtility(self):
return self.SimpleTest("mojo_public_utility",
"mojo_public_utility_unittests")
def TestMojoApplicationManager(self):
return self.SimpleTest("mojo_application_manager",
"mojo_application_manager_unittests")
def TestMojoSystem(self):
return self.SimpleTest("mojo_system", "mojo_system_unittests")
def TestMojoViewManager(self):
return self.SimpleTest("mojo_view_manager", "mojo_view_manager_unittests")
def TestNet(self):
return self.SimpleTest("net", "net_unittests")
def TestNetPerf(self):
return self.SimpleTest("net", "net_perftests")
def TestPhoneNumber(self):
return self.SimpleTest("phonenumber", "libphonenumber_unittests")
def TestPPAPI(self):
return self.SimpleTest("chrome", "ppapi_unittests")
def TestPrinting(self):
return self.SimpleTest("chrome", "printing_unittests")
def TestRemoting(self):
return self.SimpleTest("chrome", "remoting_unittests",
cmd_args=[
"--ui-test-action-timeout=60000",
"--ui-test-action-max-timeout=150000"])
def TestSql(self):
return self.SimpleTest("chrome", "sql_unittests")
def TestSync(self):
return self.SimpleTest("chrome", "sync_unit_tests")
def TestLinuxSandbox(self):
return self.SimpleTest("sandbox", "sandbox_linux_unittests")
def TestUnit(self):
# http://crbug.com/51716
# Disabling all unit tests
# Problems reappeared after r119922
if common.IsMac() and (self._options.valgrind_tool == "memcheck"):
logging.warning("unit_tests are disabled for memcheck on MacOS.")
return 0;
return self.SimpleTest("chrome", "unit_tests")
def TestUIUnit(self):
return self.SimpleTest("chrome", "ui_unittests")
def TestURL(self):
return self.SimpleTest("chrome", "url_unittests")
def TestViews(self):
return self.SimpleTest("views", "views_unittests")
# Valgrind timeouts are in seconds.
UI_VALGRIND_ARGS = ["--timeout=14400", "--trace_children", "--indirect"]
# UI test timeouts are in milliseconds.
UI_TEST_ARGS = ["--ui-test-action-timeout=60000",
"--ui-test-action-max-timeout=150000",
"--no-sandbox"]
# TODO(thestig) fine-tune these values.
# Valgrind timeouts are in seconds.
BROWSER_VALGRIND_ARGS = ["--timeout=50000", "--trace_children", "--indirect"]
# Browser test timeouts are in milliseconds.
BROWSER_TEST_ARGS = ["--ui-test-action-timeout=400000",
"--ui-test-action-max-timeout=800000",
"--no-sandbox"]
def TestBrowser(self):
return self.SimpleTest("chrome", "browser_tests",
valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
cmd_args=self.BROWSER_TEST_ARGS)
def TestContentBrowser(self):
return self.SimpleTest("content", "content_browsertests",
valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
cmd_args=self.BROWSER_TEST_ARGS)
def TestInteractiveUI(self):
return self.SimpleTest("chrome", "interactive_ui_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=self.UI_TEST_ARGS)
def TestSafeBrowsing(self):
return self.SimpleTest("chrome", "safe_browsing_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=(["--ui-test-action-max-timeout=450000"]))
def TestSyncIntegration(self):
return self.SimpleTest("chrome", "sync_integration_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=(["--ui-test-action-max-timeout=450000"]))
def TestLayoutChunk(self, chunk_num, chunk_size):
# Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
# list of tests. Wrap around to beginning of list at end.
# If chunk_size is zero, run all tests in the list once.
# If a text file is given as argument, it is used as the list of tests.
assert((chunk_size == 0) != (len(self._args) == 0))
# Build the ginormous commandline in 'cmd'.
# It's going to be roughly
# python valgrind_test.py ... python run_webkit_tests.py ...
# but we'll use the --indirect flag to valgrind_test.py
# to avoid valgrinding python.
# Start by building the valgrind_test.py commandline.
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool)
cmd.append("--trace_children")
cmd.append("--indirect_webkit_layout")
cmd.append("--ignore_exit_code")
# Now build script_cmd, the run_webkits_tests.py commandline
# Store each chunk in its own directory so that we can find the data later
chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
out_dir = os.path.join(path_utils.ScriptDir(), "latest")
out_dir = os.path.join(out_dir, chunk_dir)
if os.path.exists(out_dir):
old_files = glob.glob(os.path.join(out_dir, "*.txt"))
for f in old_files:
os.remove(f)
else:
os.makedirs(out_dir)
script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests",
"run_webkit_tests.py")
# http://crbug.com/260627: After the switch to content_shell from DRT, each
# test now brings up 3 processes. Under Valgrind, they become memory bound
# and can eventually OOM if we don't reduce the total count.
# It'd be nice if content_shell automatically throttled the startup of new
# tests if we're low on memory.
jobs = max(1, int(multiprocessing.cpu_count() * 0.3))
script_cmd = ["python", script, "-v",
# run a separate DumpRenderTree for each test
"--batch-size=1",
"--fully-parallel",
"--child-processes=%d" % jobs,
"--time-out-ms=800000",
"--no-retry-failures", # retrying takes too much time
# http://crbug.com/176908: Don't launch a browser when done.
"--no-show-results",
"--nocheck-sys-deps"]
# Pass build mode to run_webkit_tests.py. We aren't passed it directly,
# so parse it out of build_dir. run_webkit_tests.py can only handle
# the two values "Release" and "Debug".
# TODO(Hercules): unify how all our scripts pass around build mode
# (--mode / --target / --build-dir / --debug)
if self._options.build_dir:
build_root, mode = os.path.split(self._options.build_dir)
script_cmd.extend(["--build-directory", build_root, "--target", mode])
if (chunk_size > 0):
script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
if len(self._args):
# if the arg is a txt file, then treat it as a list of tests
if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
script_cmd.append("--test-list=%s" % self._args[0])
else:
script_cmd.extend(self._args)
self._AppendGtestFilter(tool, "layout", script_cmd)
# Now run script_cmd with the wrapper in cmd
cmd.extend(["--"])
cmd.extend(script_cmd)
# Layout tests often times fail quickly, but the buildbot remains green.
# Detect this situation when running with the default chunk size.
if chunk_size == self.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE:
min_runtime_in_seconds=120
else:
min_runtime_in_seconds=0
ret = tool.Run(cmd, "layout", min_runtime_in_seconds=min_runtime_in_seconds)
return ret
def TestLayout(self):
# A "chunk file" is maintained in the local directory so that each test
# runs a slice of the layout tests of size chunk_size that increments with
# each run. Since tests can be added and removed from the layout tests at
# any time, this is not going to give exact coverage, but it will allow us
# to continuously run small slices of the layout tests under valgrind rather
# than having to run all of them in one shot.
chunk_size = self._options.num_tests
if chunk_size == 0 or len(self._args):
return self.TestLayoutChunk(0, 0)
chunk_num = 0
chunk_file = os.path.join("valgrind_layout_chunk.txt")
logging.info("Reading state from " + chunk_file)
try:
f = open(chunk_file)
if f:
chunk_str = f.read()
if len(chunk_str):
chunk_num = int(chunk_str)
# This should be enough so that we have a couple of complete runs
# of test data stored in the archive (although note that when we loop
# that we almost guaranteed won't be at the end of the test list)
if chunk_num > 10000:
chunk_num = 0
f.close()
except IOError, (errno, strerror):
logging.error("error reading from file %s (%d, %s)" % (chunk_file,
errno, strerror))
# Save the new chunk size before running the tests. Otherwise if a
# particular chunk hangs the bot, the chunk number will never get
# incremented and the bot will be wedged.
logging.info("Saving state to " + chunk_file)
try:
f = open(chunk_file, "w")
chunk_num += 1
f.write("%d" % chunk_num)
f.close()
except IOError, (errno, strerror):
logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
strerror))
# Since we're running small chunks of the layout tests, it's important to
# mark the ones that have errors in them. These won't be visible in the
# summary list for long, but will be useful for someone reviewing this bot.
return self.TestLayoutChunk(chunk_num, chunk_size)
# The known list of tests.
# Recognise the original abbreviations as well as full executable names.
_test_list = {
"cmdline" : RunCmdLine,
"addressinput": TestAddressInput,
"libaddressinput_unittests": TestAddressInput,
"accessibility": TestAccessibility,
"angle": TestAngle, "angle_unittests": TestAngle,
"app_list": TestAppList, "app_list_unittests": TestAppList,
"ash": TestAsh, "ash_unittests": TestAsh,
"ash_shell": TestAshShell, "ash_shell_unittests": TestAshShell,
"aura": TestAura, "aura_unittests": TestAura,
"base": TestBase, "base_unittests": TestBase,
"blink_heap": TestBlinkHeap,
"blink_platform": TestBlinkPlatform,
"browser": TestBrowser, "browser_tests": TestBrowser,
"cacheinvalidation": TestCacheInvalidation,
"cacheinvalidation_unittests": TestCacheInvalidation,
"cast": TestCast, "cast_unittests": TestCast,
"cc": TestCC, "cc_unittests": TestCC,
"chrome_app": TestChromeApp,
"chrome_elf": TestChromeElf,
"chromedriver": TestChromeDriver,
"chromeos": TestChromeOS, "chromeos_unittests": TestChromeOS,
"cloud_print": TestCloudPrint,
"cloud_print_unittests": TestCloudPrint,
"components": TestComponents,"components_unittests": TestComponents,
"compositor": TestCompositor,"compositor_unittests": TestCompositor,
"content": TestContent, "content_unittests": TestContent,
"content_browsertests": TestContentBrowser,
"courgette": TestCourgette, "courgette_unittests": TestCourgette,
"crypto": TestCrypto, "crypto_unittests": TestCrypto,
"device": TestDevice, "device_unittests": TestDevice,
"display": TestDisplay, "display_unittests": TestDisplay,
"events": TestEvents, "events_unittests": TestEvents,
"extensions": TestExtensions, "extensions_unittests": TestExtensions,
"ffmpeg": TestFFmpeg, "ffmpeg_unittests": TestFFmpeg,
"ffmpeg_regression_tests": TestFFmpegRegressions,
"gcm": TestGCM, "gcm_unit_tests": TestGCM,
"gin": TestGin, "gin_unittests": TestGin,
"gfx": TestGfx, "gfx_unittests": TestGfx,
"google_apis": TestGoogleApis,
"gpu": TestGPU, "gpu_unittests": TestGPU,
"ipc": TestIpc, "ipc_tests": TestIpc,
"installer_util": TestInstallerUtil,
"interactive_ui": TestInteractiveUI,
"jingle": TestJingle, "jingle_unittests": TestJingle,
"keyboard": TestKeyboard, "keyboard_unittests": TestKeyboard,
"layout": TestLayout, "layout_tests": TestLayout,
"media": TestMedia, "media_unittests": TestMedia,
"message_center": TestMessageCenter,
"message_center_unittests" : TestMessageCenter,
"mojo_apps_js": TestMojoAppsJS,
"mojo_common": TestMojoCommon,
"mojo_js": TestMojoJS,
"mojo_system": TestMojoSystem,
"mojo_public_system": TestMojoPublicSystem,
"mojo_public_utility": TestMojoPublicUtility,
"mojo_public_bindings": TestMojoPublicBindings,
"mojo_public_env": TestMojoPublicEnv,
"mojo_public_sysperf": TestMojoPublicSysPerf,
"mojo_application_manager": TestMojoApplicationManager,
"mojo_view_manager": TestMojoViewManager,
"net": TestNet, "net_unittests": TestNet,
"net_perf": TestNetPerf, "net_perftests": TestNetPerf,
"phonenumber": TestPhoneNumber,
"libphonenumber_unittests": TestPhoneNumber,
"ppapi": TestPPAPI, "ppapi_unittests": TestPPAPI,
"printing": TestPrinting, "printing_unittests": TestPrinting,
"remoting": TestRemoting, "remoting_unittests": TestRemoting,
"safe_browsing": TestSafeBrowsing, "safe_browsing_tests": TestSafeBrowsing,
"sandbox": TestLinuxSandbox, "sandbox_linux_unittests": TestLinuxSandbox,
"sql": TestSql, "sql_unittests": TestSql,
"sync": TestSync, "sync_unit_tests": TestSync,
"sync_integration_tests": TestSyncIntegration,
"sync_integration": TestSyncIntegration,
"ui_unit": TestUIUnit, "ui_unittests": TestUIUnit,
"unit": TestUnit, "unit_tests": TestUnit,
"url": TestURL, "url_unittests": TestURL,
"views": TestViews, "views_unittests": TestViews,
"webkit": TestLayout,
}
def _main():
parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
"[-t <test> ...]")
parser.add_option("--help-tests", dest="help_tests", action="store_true",
default=False, help="List all available tests")
parser.add_option("-b", "--build-dir",
help="the location of the compiler output")
parser.add_option("--target", help="Debug or Release")
parser.add_option("-t", "--test", action="append", default=[],
help="which test to run, supports test:gtest_filter format "
"as well.")
parser.add_option("--baseline", action="store_true", default=False,
help="generate baseline data instead of validating")
parser.add_option("--gtest_filter",
help="additional arguments to --gtest_filter")
parser.add_option("--gtest_repeat", help="argument for --gtest_repeat")
parser.add_option("--gtest_shuffle", action="store_true", default=False,
help="Randomize tests' orders on every iteration.")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="verbose output - enable debug log messages")
parser.add_option("--tool", dest="valgrind_tool", default="memcheck",
help="specify a valgrind tool to run the tests under")
parser.add_option("--tool_flags", dest="valgrind_tool_flags", default="",
help="specify custom flags for the selected valgrind tool")
parser.add_option("--keep_logs", action="store_true", default=False,
help="store memory tool logs in the <tool>.logs directory "
"instead of /tmp.\nThis can be useful for tool "
"developers/maintainers.\nPlease note that the <tool>"
".logs directory will be clobbered on tool startup.")
parser.add_option("-n", "--num_tests", type="int",
default=ChromeTests.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE,
help="for layout tests: # of subtests per run. 0 for all.")
# TODO(thestig) Remove this if we can.
parser.add_option("--gtest_color", dest="gtest_color", default="no",
help="dummy compatibility flag for sharding_supervisor.")
parser.add_option("--brave-new-test-launcher", action="store_true",
help="run the tests with --brave-new-test-launcher")
parser.add_option("--test-launcher-bot-mode", action="store_true",
help="run the tests with --test-launcher-bot-mode")
options, args = parser.parse_args()
# Bake target into build_dir.
if options.target and options.build_dir:
assert (options.target !=
os.path.basename(os.path.dirname(options.build_dir)))
options.build_dir = os.path.join(os.path.abspath(options.build_dir),
options.target)
if options.verbose:
logging_utils.config_root(logging.DEBUG)
else:
logging_utils.config_root()
if options.help_tests:
ChromeTests.ShowTests()
return 0
if not options.test:
parser.error("--test not specified")
if len(options.test) != 1 and options.gtest_filter:
parser.error("--gtest_filter and multiple tests don't make sense together")
for t in options.test:
tests = ChromeTests(options, args, t)
ret = tests.Run()
if ret: return ret
return 0
if __name__ == "__main__":
sys.exit(_main())
| bsd-3-clause |
dreamsxin/kbengine | kbe/res/scripts/common/Lib/site-packages/pip/_vendor/six.py | 271 | 23462 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2014 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.6.1"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
try:
result = self._resolve()
except ImportError:
# See the nice big comment in MovedModule.__getattr__.
raise AttributeError("%s could not be imported " % self.name)
setattr(obj, self.name, result) # Invokes __set__.
# This is a bit ugly, but it avoids running this again.
delattr(obj.__class__, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
# It turns out many Python frameworks like to traverse sys.modules and
# try to load various attributes. This causes problems if this is a
# platform-specific module on the wrong platform, like _winreg on
# Unixes. Therefore, we silently pretend unimportable modules do not
# have any attributes. See issues #51, #53, #56, and #63 for the full
# tales of woe.
#
# First, if possible, avoid loading the module just to look at __file__,
# __name__, or __path__.
if (attr in ("__file__", "__name__", "__path__") and
self.mod not in sys.modules):
raise AttributeError(attr)
try:
_module = self._resolve()
except ImportError:
raise AttributeError(attr)
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "xmlrpclib", "xmlrpc.server"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
sys.modules[__name__ + ".moves." + attr.name] = attr
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = sys.modules[__name__ + ".moves"] = _MovedItems(__name__ + ".moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
sys.modules[__name__ + ".moves.urllib_parse"] = sys.modules[__name__ + ".moves.urllib.parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
sys.modules[__name__ + ".moves.urllib_error"] = sys.modules[__name__ + ".moves.urllib.error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
sys.modules[__name__ + ".moves.urllib_request"] = sys.modules[__name__ + ".moves.urllib.request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
sys.modules[__name__ + ".moves.urllib_response"] = sys.modules[__name__ + ".moves.urllib.response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
sys.modules[__name__ + ".moves.urllib_robotparser"] = sys.modules[__name__ + ".moves.urllib.robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
parse = sys.modules[__name__ + ".moves.urllib_parse"]
error = sys.modules[__name__ + ".moves.urllib_error"]
request = sys.modules[__name__ + ".moves.urllib_request"]
response = sys.modules[__name__ + ".moves.urllib_response"]
robotparser = sys.modules[__name__ + ".moves.urllib_robotparser"]
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
sys.modules[__name__ + ".moves.urllib"] = Module_six_moves_urllib(__name__ + ".moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
_iterlists = "lists"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
_iterlists = "iterlists"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
def iterkeys(d, **kw):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)(**kw))
def itervalues(d, **kw):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)(**kw))
def iteritems(d, **kw):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)(**kw))
def iterlists(d, **kw):
"""Return an iterator over the (key, [values]) pairs of a dictionary."""
return iter(getattr(d, _iterlists)(**kw))
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
def iterbytes(buf):
return (ord(byte) for byte in buf)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
return meta("NewBase", bases, {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
| lgpl-3.0 |
Julius2342/pyvlx | pyvlx/api/get_state.py | 1 | 1220 | """Module for retrieving gateway state from API."""
from pyvlx.dataobjects import DtoState
from .api_event import ApiEvent
from .frames import FrameGetStateConfirmation, FrameGetStateRequest
class GetState(ApiEvent):
"""Class for retrieving gateway state from API."""
def __init__(self, pyvlx):
"""Initialize GetState class."""
super().__init__(pyvlx=pyvlx)
self.success = False
self.state = DtoState()
async def handle_frame(self, frame):
"""Handle incoming API frame, return True if this was the expected frame."""
if not isinstance(frame, FrameGetStateConfirmation):
return False
self.success = True
self.state = DtoState(frame.gateway_state, frame.gateway_sub_state)
return True
def request_frame(self):
"""Construct initiating frame."""
return FrameGetStateRequest()
@property
def gateway_state(self):
"""Return Gateway State as human readable string. Deprecated."""
return self.state.gateway_state
@property
def gateway_sub_state(self):
"""Return Gateway Sub State as human readable string. Deprecated."""
return self.state.gateway_sub_state
| lgpl-3.0 |
tensor-tang/Paddle | python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_with_reduce_cpu.py | 2 | 3977 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from parallel_executor_test_base import TestParallelExecutorBase
import seresnext_net
import paddle.fluid.core as core
class TestResnetWithReduceBase(TestParallelExecutorBase):
def _compare_reduce_and_allreduce(self, use_cuda, delta2=1e-5):
if use_cuda and not core.is_compiled_with_cuda():
return
all_reduce_first_loss, all_reduce_last_loss = self.check_network_convergence(
seresnext_net.model,
feed_dict=seresnext_net.feed_dict(use_cuda),
iter=seresnext_net.iter(use_cuda),
batch_size=seresnext_net.batch_size(),
use_cuda=use_cuda,
use_reduce=False,
optimizer=seresnext_net.optimizer)
reduce_first_loss, reduce_last_loss = self.check_network_convergence(
seresnext_net.model,
feed_dict=seresnext_net.feed_dict(use_cuda),
iter=seresnext_net.iter(use_cuda),
batch_size=seresnext_net.batch_size(),
use_cuda=use_cuda,
use_reduce=True,
optimizer=seresnext_net.optimizer)
for loss in zip(all_reduce_first_loss, reduce_first_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-5)
for loss in zip(all_reduce_last_loss, reduce_last_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=delta2)
if not use_cuda:
return
all_reduce_first_loss_seq, all_reduce_last_loss_seq = self.check_network_convergence(
seresnext_net.model,
feed_dict=seresnext_net.feed_dict(use_cuda),
iter=seresnext_net.iter(use_cuda),
batch_size=seresnext_net.batch_size(),
use_cuda=use_cuda,
use_reduce=False,
optimizer=seresnext_net.optimizer,
enable_sequential_execution=True)
reduce_first_loss_seq, reduce_last_loss_seq = self.check_network_convergence(
seresnext_net.model,
feed_dict=seresnext_net.feed_dict(use_cuda),
iter=seresnext_net.iter(use_cuda),
batch_size=seresnext_net.batch_size(),
use_cuda=use_cuda,
use_reduce=True,
optimizer=seresnext_net.optimizer,
enable_sequential_execution=True)
for loss in zip(all_reduce_first_loss, all_reduce_first_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-5)
for loss in zip(all_reduce_last_loss, all_reduce_last_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=delta2)
for loss in zip(reduce_first_loss, reduce_first_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-5)
for loss in zip(reduce_last_loss, reduce_last_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=delta2)
for loss in zip(all_reduce_first_loss_seq, reduce_first_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-5)
for loss in zip(all_reduce_last_loss_seq, reduce_last_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=delta2)
class TestResnetWithReduceCPU(TestResnetWithReduceBase):
def test_seresnext_with_reduce(self):
self._compare_reduce_and_allreduce(use_cuda=False, delta2=1e-3)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
danielnelson/aiohttp | aiohttp/web_reqrep.py | 5 | 24862 | import asyncio
import binascii
import cgi
import collections
import datetime
import http.cookies
import io
import json
import math
import time
import warnings
import enum
from email.utils import parsedate
from types import MappingProxyType
from urllib.parse import urlsplit, parse_qsl, unquote
from . import hdrs
from .helpers import reify
from .multidict import (CIMultiDictProxy,
CIMultiDict,
MultiDictProxy,
MultiDict)
from .protocol import Response as ResponseImpl, HttpVersion10, HttpVersion11
from .streams import EOF_MARKER
__all__ = ('ContentCoding', 'Request', 'StreamResponse', 'Response')
sentinel = object()
class HeadersMixin:
_content_type = None
_content_dict = None
_stored_content_type = sentinel
def _parse_content_type(self, raw):
self._stored_content_type = raw
if raw is None:
# default value according to RFC 2616
self._content_type = 'application/octet-stream'
self._content_dict = {}
else:
self._content_type, self._content_dict = cgi.parse_header(raw)
@property
def content_type(self, _CONTENT_TYPE=hdrs.CONTENT_TYPE):
"""The value of content part for Content-Type HTTP header."""
raw = self.headers.get(_CONTENT_TYPE)
if self._stored_content_type != raw:
self._parse_content_type(raw)
return self._content_type
@property
def charset(self, _CONTENT_TYPE=hdrs.CONTENT_TYPE):
"""The value of charset part for Content-Type HTTP header."""
raw = self.headers.get(_CONTENT_TYPE)
if self._stored_content_type != raw:
self._parse_content_type(raw)
return self._content_dict.get('charset')
@property
def content_length(self, _CONTENT_LENGTH=hdrs.CONTENT_LENGTH):
"""The value of Content-Length HTTP header."""
l = self.headers.get(_CONTENT_LENGTH)
if l is None:
return None
else:
return int(l)
FileField = collections.namedtuple('Field', 'name filename file content_type')
class ContentCoding(enum.Enum):
# The content codings that we have support for.
#
# Additional registered codings are listed at:
# https://www.iana.org/assignments/http-parameters/http-parameters.xhtml#content-coding
deflate = 'deflate'
gzip = 'gzip'
identity = 'identity'
############################################################
# HTTP Request
############################################################
class Request(dict, HeadersMixin):
POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT,
hdrs.METH_TRACE, hdrs.METH_DELETE}
def __init__(self, app, message, payload, transport, reader, writer, *,
_HOST=hdrs.HOST, secure_proxy_ssl_header=None):
self._app = app
self._version = message.version
self._transport = transport
self._reader = reader
self._writer = writer
self._method = message.method
self._host = message.headers.get(_HOST)
self._path_qs = message.path
self._post = None
self._post_files_cache = None
self._headers = CIMultiDictProxy(message.headers)
if self._version < HttpVersion10:
self._keep_alive = False
else:
self._keep_alive = not message.should_close
# matchdict, route_name, handler
# or information about traversal lookup
self._match_info = None # initialized after route resolving
self._payload = payload
self._cookies = None
self._read_bytes = None
self._has_body = not payload.at_eof()
self._secure_proxy_ssl_header = secure_proxy_ssl_header
@reify
def scheme(self):
"""A string representing the scheme of the request.
'http' or 'https'.
"""
if self._transport.get_extra_info('sslcontext'):
return 'https'
secure_proxy_ssl_header = self._secure_proxy_ssl_header
if secure_proxy_ssl_header is not None:
header, value = secure_proxy_ssl_header
if self._headers.get(header) == value:
return 'https'
return 'http'
@property
def method(self):
"""Read only property for getting HTTP method.
The value is upper-cased str like 'GET', 'POST', 'PUT' etc.
"""
return self._method
@property
def version(self):
"""Read only property for getting HTTP version of request.
Returns aiohttp.protocol.HttpVersion instance.
"""
return self._version
@property
def host(self):
"""Read only property for getting *HOST* header of request.
Returns str or None if HTTP request has no HOST header.
"""
return self._host
@property
def path_qs(self):
"""The URL including PATH_INFO and the query string.
E.g, /app/blog?id=10
"""
return self._path_qs
@reify
def _splitted_path(self):
url = '{}://{}{}'.format(self.scheme, self.host, self._path_qs)
return urlsplit(url)
@property
def raw_path(self):
""" The URL including raw *PATH INFO* without the host or scheme.
Warning, the path is unquoted and may contains non valid URL characters
E.g., ``/my%2Fpath%7Cwith%21some%25strange%24characters``
"""
return self._splitted_path.path
@reify
def path(self):
"""The URL including *PATH INFO* without the host or scheme.
E.g., ``/app/blog``
"""
return unquote(self.raw_path)
@reify
def query_string(self):
"""The query string in the URL.
E.g., id=10
"""
return self._splitted_path.query
@reify
def GET(self):
"""A multidict with all the variables in the query string.
Lazy property.
"""
return MultiDictProxy(MultiDict(parse_qsl(self.query_string,
keep_blank_values=True)))
@reify
def POST(self):
"""A multidict with all the variables in the POST parameters.
post() methods has to be called before using this attribute.
"""
if self._post is None:
raise RuntimeError("POST is not available before post()")
return self._post
@property
def headers(self):
"""A case-insensitive multidict proxy with all headers."""
return self._headers
@property
def if_modified_since(self, _IF_MODIFIED_SINCE=hdrs.IF_MODIFIED_SINCE):
"""The value of If-Modified-Since HTTP header, or None.
This header is represented as a `datetime` object.
"""
httpdate = self.headers.get(_IF_MODIFIED_SINCE)
if httpdate is not None:
timetuple = parsedate(httpdate)
if timetuple is not None:
return datetime.datetime(*timetuple[:6],
tzinfo=datetime.timezone.utc)
return None
@property
def keep_alive(self):
"""Is keepalive enabled by client?"""
return self._keep_alive
@property
def match_info(self):
"""Result of route resolving."""
return self._match_info
@property
def app(self):
"""Application instance."""
return self._app
@property
def transport(self):
"""Transport used for request processing."""
return self._transport
@property
def cookies(self):
"""Return request cookies.
A read-only dictionary-like object.
"""
if self._cookies is None:
raw = self.headers.get(hdrs.COOKIE, '')
parsed = http.cookies.SimpleCookie(raw)
self._cookies = MappingProxyType(
{key: val.value for key, val in parsed.items()})
return self._cookies
@property
def payload(self):
"""Return raw payload stream."""
warnings.warn('use Request.content instead', DeprecationWarning)
return self._payload
@property
def content(self):
"""Return raw payload stream."""
return self._payload
@property
def has_body(self):
"""Return True if request has HTTP BODY, False otherwise."""
return self._has_body
@asyncio.coroutine
def release(self):
"""Release request.
Eat unread part of HTTP BODY if present.
"""
chunk = yield from self._payload.readany()
while chunk is not EOF_MARKER or chunk:
chunk = yield from self._payload.readany()
@asyncio.coroutine
def read(self):
"""Read request body if present.
Returns bytes object with full request content.
"""
if self._read_bytes is None:
body = bytearray()
while True:
chunk = yield from self._payload.readany()
body.extend(chunk)
if chunk is EOF_MARKER:
break
self._read_bytes = bytes(body)
return self._read_bytes
@asyncio.coroutine
def text(self):
"""Return BODY as text using encoding from .charset."""
bytes_body = yield from self.read()
encoding = self.charset or 'utf-8'
return bytes_body.decode(encoding)
@asyncio.coroutine
def json(self, *, loader=json.loads):
"""Return BODY as JSON."""
body = yield from self.text()
return loader(body)
@asyncio.coroutine
def post(self):
"""Return POST parameters."""
if self._post is not None:
return self._post
if self.method not in self.POST_METHODS:
self._post = MultiDictProxy(MultiDict())
return self._post
content_type = self.content_type
if (content_type not in ('',
'application/x-www-form-urlencoded',
'multipart/form-data')):
self._post = MultiDictProxy(MultiDict())
return self._post
body = yield from self.read()
content_charset = self.charset or 'utf-8'
environ = {'REQUEST_METHOD': self.method,
'CONTENT_LENGTH': str(len(body)),
'QUERY_STRING': '',
'CONTENT_TYPE': self.headers.get(hdrs.CONTENT_TYPE)}
fs = cgi.FieldStorage(fp=io.BytesIO(body),
environ=environ,
keep_blank_values=True,
encoding=content_charset)
supported_transfer_encoding = {
'base64': binascii.a2b_base64,
'quoted-printable': binascii.a2b_qp
}
out = MultiDict()
_count = 1
for field in fs.list or ():
transfer_encoding = field.headers.get(
hdrs.CONTENT_TRANSFER_ENCODING, None)
if field.filename:
ff = FileField(field.name,
field.filename,
field.file, # N.B. file closed error
field.type)
if self._post_files_cache is None:
self._post_files_cache = {}
self._post_files_cache[field.name+str(_count)] = field
_count += 1
out.add(field.name, ff)
else:
value = field.value
if transfer_encoding in supported_transfer_encoding:
# binascii accepts bytes
value = value.encode('utf-8')
value = supported_transfer_encoding[
transfer_encoding](value)
out.add(field.name, value)
self._post = MultiDictProxy(out)
return self._post
def __repr__(self):
return "<{} {} {} >".format(self.__class__.__name__,
self.method, self.path)
############################################################
# HTTP Response classes
############################################################
class StreamResponse(HeadersMixin):
def __init__(self, *, status=200, reason=None, headers=None):
self._body = None
self._keep_alive = None
self._chunked = False
self._chunk_size = None
self._compression = False
self._compression_force = False
self._headers = CIMultiDict()
self._cookies = http.cookies.SimpleCookie()
self.set_status(status, reason)
self._req = None
self._resp_impl = None
self._eof_sent = False
if headers is not None:
self._headers.extend(headers)
def _copy_cookies(self):
for cookie in self._cookies.values():
value = cookie.output(header='')[1:]
self.headers.add(hdrs.SET_COOKIE, value)
@property
def prepared(self):
return self._resp_impl is not None
@property
def started(self):
warnings.warn('use Response.prepared instead', DeprecationWarning)
return self.prepared
@property
def status(self):
return self._status
@property
def chunked(self):
return self._chunked
@property
def compression(self):
return self._compression
@property
def reason(self):
return self._reason
def set_status(self, status, reason=None):
self._status = int(status)
if reason is None:
reason = ResponseImpl.calc_reason(status)
self._reason = reason
@property
def keep_alive(self):
return self._keep_alive
def force_close(self):
self._keep_alive = False
def enable_chunked_encoding(self, chunk_size=None):
"""Enables automatic chunked transfer encoding."""
self._chunked = True
self._chunk_size = chunk_size
def enable_compression(self, force=None):
"""Enables response compression encoding."""
# Backwards compatibility for when force was a bool <0.17.
if type(force) == bool:
force = ContentCoding.deflate if force else ContentCoding.identity
self._compression = True
self._compression_force = force
@property
def headers(self):
return self._headers
@property
def cookies(self):
return self._cookies
def set_cookie(self, name, value, *, expires=None,
domain=None, max_age=None, path='/',
secure=None, httponly=None, version=None):
"""Set or update response cookie.
Sets new cookie or updates existent with new value.
Also updates only those params which are not None.
"""
old = self._cookies.get(name)
if old is not None and old.coded_value == '':
# deleted cookie
self._cookies.pop(name, None)
self._cookies[name] = value
c = self._cookies[name]
if expires is not None:
c['expires'] = expires
if domain is not None:
c['domain'] = domain
if max_age is not None:
c['max-age'] = max_age
elif 'max-age' in c:
del c['max-age']
c['path'] = path
if secure is not None:
c['secure'] = secure
if httponly is not None:
c['httponly'] = httponly
if version is not None:
c['version'] = version
def del_cookie(self, name, *, domain=None, path='/'):
"""Delete cookie.
Creates new empty expired cookie.
"""
# TODO: do we need domain/path here?
self._cookies.pop(name, None)
self.set_cookie(name, '', max_age=0, domain=domain, path=path)
@property
def content_length(self):
# Just a placeholder for adding setter
return super().content_length
@content_length.setter
def content_length(self, value):
if value is not None:
value = int(value)
# TODO: raise error if chunked enabled
self.headers[hdrs.CONTENT_LENGTH] = str(value)
else:
self.headers.pop(hdrs.CONTENT_LENGTH, None)
@property
def content_type(self):
# Just a placeholder for adding setter
return super().content_type
@content_type.setter
def content_type(self, value):
self.content_type # read header values if needed
self._content_type = str(value)
self._generate_content_type_header()
@property
def charset(self):
# Just a placeholder for adding setter
return super().charset
@charset.setter
def charset(self, value):
ctype = self.content_type # read header values if needed
if ctype == 'application/octet-stream':
raise RuntimeError("Setting charset for application/octet-stream "
"doesn't make sense, setup content_type first")
if value is None:
self._content_dict.pop('charset', None)
else:
self._content_dict['charset'] = str(value).lower()
self._generate_content_type_header()
@property
def last_modified(self, _LAST_MODIFIED=hdrs.LAST_MODIFIED):
"""The value of Last-Modified HTTP header, or None.
This header is represented as a `datetime` object.
"""
httpdate = self.headers.get(_LAST_MODIFIED)
if httpdate is not None:
timetuple = parsedate(httpdate)
if timetuple is not None:
return datetime.datetime(*timetuple[:6],
tzinfo=datetime.timezone.utc)
return None
@last_modified.setter
def last_modified(self, value):
if value is None:
if hdrs.LAST_MODIFIED in self.headers:
del self.headers[hdrs.LAST_MODIFIED]
elif isinstance(value, (int, float)):
self.headers[hdrs.LAST_MODIFIED] = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", time.gmtime(math.ceil(value)))
elif isinstance(value, datetime.datetime):
self.headers[hdrs.LAST_MODIFIED] = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", value.utctimetuple())
elif isinstance(value, str):
self.headers[hdrs.LAST_MODIFIED] = value
def _generate_content_type_header(self, CONTENT_TYPE=hdrs.CONTENT_TYPE):
params = '; '.join("%s=%s" % i for i in self._content_dict.items())
if params:
ctype = self._content_type + '; ' + params
else:
ctype = self._content_type
self.headers[CONTENT_TYPE] = ctype
def _start_pre_check(self, request):
if self._resp_impl is not None:
if self._req is not request:
raise RuntimeError(
'Response has been started with different request.')
else:
return self._resp_impl
else:
return None
def _start_compression(self, request):
def _start(coding):
if coding != ContentCoding.identity:
self.headers[hdrs.CONTENT_ENCODING] = coding.value
self._resp_impl.add_compression_filter(coding.value)
self.content_length = None
if self._compression_force:
_start(self._compression_force)
else:
accept_encoding = request.headers.get(
hdrs.ACCEPT_ENCODING, '').lower()
for coding in ContentCoding:
if coding.value in accept_encoding:
_start(coding)
return
def start(self, request):
warnings.warn('use .prepare(request) instead', DeprecationWarning)
resp_impl = self._start_pre_check(request)
if resp_impl is not None:
return resp_impl
return self._start(request)
@asyncio.coroutine
def prepare(self, request):
resp_impl = self._start_pre_check(request)
if resp_impl is not None:
return resp_impl
return self._start(request)
def _start(self, request):
self._req = request
keep_alive = self._keep_alive
if keep_alive is None:
keep_alive = request.keep_alive
self._keep_alive = keep_alive
resp_impl = self._resp_impl = ResponseImpl(
request._writer,
self._status,
request.version,
not keep_alive,
self._reason)
self._copy_cookies()
if self._compression:
self._start_compression(request)
if self._chunked:
if request.version != HttpVersion11:
raise RuntimeError("Using chunked encoding is forbidden "
"for HTTP/{0.major}.{0.minor}".format(
request.version))
resp_impl.enable_chunked_encoding()
if self._chunk_size:
resp_impl.add_chunking_filter(self._chunk_size)
headers = self.headers.items()
for key, val in headers:
resp_impl.add_header(key, val)
resp_impl.send_headers()
return resp_impl
def write(self, data):
assert isinstance(data, (bytes, bytearray, memoryview)), \
'data argument must be byte-ish (%r)' % type(data)
if self._eof_sent:
raise RuntimeError("Cannot call write() after write_eof()")
if self._resp_impl is None:
raise RuntimeError("Cannot call write() before start()")
if data:
return self._resp_impl.write(data)
else:
return ()
@asyncio.coroutine
def drain(self):
if self._resp_impl is None:
raise RuntimeError("Response has not been started")
yield from self._resp_impl.transport.drain()
@asyncio.coroutine
def write_eof(self):
if self._eof_sent:
return
if self._resp_impl is None:
raise RuntimeError("Response has not been started")
yield from self._resp_impl.write_eof()
self._eof_sent = True
def __repr__(self):
if self.started:
info = "{} {} ".format(self._req.method, self._req.path)
else:
info = "not started"
return "<{} {} {}>".format(self.__class__.__name__,
self.reason, info)
class Response(StreamResponse):
def __init__(self, *, body=None, status=200,
reason=None, text=None, headers=None, content_type=None):
super().__init__(status=status, reason=reason, headers=headers)
if body is not None and text is not None:
raise ValueError("body and text are not allowed together.")
if text is not None:
if hdrs.CONTENT_TYPE not in self.headers:
# fast path for filling headers
if not isinstance(text, str):
raise TypeError('text argument must be str (%r)' %
type(text))
if content_type is None:
content_type = 'text/plain'
self.headers[hdrs.CONTENT_TYPE] = (
content_type + '; charset=utf-8')
self._content_type = content_type
self._content_dict = {'charset': 'utf-8'}
self.body = text.encode('utf-8')
else:
self.text = text
else:
if content_type:
self.content_type = content_type
if body is not None:
self.body = body
else:
self.body = None
@property
def body(self):
return self._body
@body.setter
def body(self, body):
if body is not None and not isinstance(body, bytes):
raise TypeError('body argument must be bytes (%r)' % type(body))
self._body = body
if body is not None:
self.content_length = len(body)
else:
self.content_length = 0
@property
def text(self):
if self._body is None:
return None
return self._body.decode(self.charset or 'utf-8')
@text.setter
def text(self, text):
if text is not None and not isinstance(text, str):
raise TypeError('text argument must be str (%r)' % type(text))
if self.content_type == 'application/octet-stream':
self.content_type = 'text/plain'
if self.charset is None:
self.charset = 'utf-8'
self.body = text.encode(self.charset)
@asyncio.coroutine
def write_eof(self):
body = self._body
if body is not None:
self.write(body)
yield from super().write_eof()
| apache-2.0 |
hansraj/video-transcoding | utils/generate_tests.py | 2 | 3731 | #!/usr/bin/env python
"""
Arista Test Generator
=====================
Generate a series of test files containing audio/video to run through the
transcoder for unit testing.
License
-------
Copyright 2008 Daniel G. Taylor <dan@programmer-art.org>
This file is part of Arista.
Arista is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Foobar is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Arista. If not, see <http://www.gnu.org/licenses/>.
"""
import os
if not os.path.exists("tests"):
os.mkdir("tests")
os.chdir("tests")
print "Generating test samples..."
# Ogg (Theora/Vorbis) tests
os.system("gst-launch-0.10 audiotestsrc num-buffers=500 ! audiorate ! audioconvert ! audioresample ! vorbisenc ! oggmux ! filesink location='test-audio.ogg'")
os.system("gst-launch-0.10 videotestsrc num-buffers=500 ! ffmpegcolorspace ! videoscale ! videorate ! theoraenc ! oggmux ! filesink location='test-video.ogg'")
os.system("gst-launch-0.10 videotestsrc num-buffers=500 ! ffmpegcolorspace ! videoscale ! videorate ! theoraenc ! queue ! oggmux name=mux ! filesink location='test.ogg' audiotestsrc num-buffers=500 ! audiorate ! audioconvert ! audioresample ! vorbisenc ! queue ! mux.")
# AVI (XVID, MP3), etc.
os.system("gst-launch-0.10 audiotestsrc num-buffers=500 ! audiorate ! audioconvert ! audioresample ! lame ! filesink location='test-audio.mp3'")
os.system("gst-launch-0.10 videotestsrc num-buffers=500 ! ffmpegcolorspace ! videoscale ! videorate ! xvidenc ! avimux ! filesink location='test-video.avi'")
os.system("gst-launch-0.10 videotestsrc num-buffers=500 ! ffmpegcolorspace ! videoscale ! videorate ! xvidenc ! queue ! avimux name=mux ! filesink location='test.avi' audiotestsrc num-buffers=500 ! audiorate ! audioconvert ! audioresample ! lame ! queue ! mux.")
# MP4 (H.264, AAC), etc
os.system("gst-launch-0.10 audiotestsrc num-buffers=500 ! audiorate ! audioconvert ! audioresample ! faac ! qtmux ! filesink location='test-audio.m4a'")
os.system("gst-launch-0.10 videotestsrc num-buffers=500 ! ffmpegcolorspace ! videoscale ! videorate ! x264enc ! qtmux ! filesink location='test-video.mp4'")
os.system("gst-launch-0.10 videotestsrc num-buffers=500 ! ffmpegcolorspace ! videoscale ! videorate ! x264enc ! queue ! qtmux name=mux ! filesink location='test.mp4' audiotestsrc num-buffers=500 ! audiorate ! audioconvert ! audioresample ! faac ! queue ! mux.")
os.system("gst-launch-0.10 videotestsrc num-buffers=500 ! ffmpegcolorspace ! videoscale ! videorate ! xvidenc ! queue ! qtmux name=mux ! filesink location='test2.mp4' audiotestsrc num-buffers=500 ! audiorate ! audioconvert ! audioresample ! lame ! queue ! mux.")
# DV
# Why does this fail?
#os.system("gst-launch-0.10 videotestsrc num-buffers=500 ! ffmpegcolorspace ! videoscale ! videorate ! ffenc_dvvideo ! queue ! ffmux_dv name=mux ! filesink location='test.dv' audiotestsrc num-buffers=500 ! audiorate ! audioconvert ! audioresample ! queue ! mux.")
# ASF (WMV/WMA)
os.system("gst-launch-0.10 videotestsrc num-buffers=500 ! ffmpegcolorspace ! videoscale ! videorate ! ffenc_wmv2 ! queue ! asfmux name=mux ! filesink location='test.wmv' audiotestsrc num-buffers=500 ! audiorate ! audioconvert ! audioresample ! ffenc_wmav2 ! queue ! mux.")
print "Test samples can be found in the tests directory."
| lgpl-2.1 |
MichaelQQ/linux-2.6.35-vpls | tools/perf/scripts/python/check-perf-trace.py | 948 | 2501 | # perf trace event handlers, generated by perf trace -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
Slepice1/simple-web-generator | setup.py | 1 | 9036 | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import imp
import subprocess
## Python 2.6 subprocess.check_output compatibility. Thanks Greg Hewgill!
if 'check_output' not in dir(subprocess):
def check_output(cmd_args, *args, **kwargs):
proc = subprocess.Popen(
cmd_args, *args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
out, err = proc.communicate()
if proc.returncode != 0:
raise subprocess.CalledProcessError(args)
return out
subprocess.check_output = check_output
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
from distutils import spawn
try:
import colorama
colorama.init() # Initialize colorama on Windows
except ImportError:
# Don't require colorama just for running paver tasks. This allows us to
# run `paver install' without requiring the user to first have colorama
# installed.
pass
# Add the current directory to the module search path.
sys.path.insert(0, os.path.abspath('.'))
## Constants
CODE_DIRECTORY = 'simple_web_generator'
DOCS_DIRECTORY = 'docs'
TESTS_DIRECTORY = 'tests'
PYTEST_FLAGS = ['--doctest-modules']
# Import metadata. Normally this would just be:
#
# from simple_web_generator import metadata
#
# However, when we do this, we also import `simple_web_generator/__init__.py'. If this
# imports names from some other modules and these modules have third-party
# dependencies that need installing (which happens after this file is run), the
# script will crash. What we do instead is to load the metadata module by path
# instead, effectively side-stepping the dependency problem. Please make sure
# metadata has no dependencies, otherwise they will need to be added to
# the setup_requires keyword.
metadata = imp.load_source(
'metadata', os.path.join(CODE_DIRECTORY, 'metadata.py'))
## Miscellaneous helper functions
def get_project_files():
"""Retrieve a list of project files, ignoring hidden files.
:return: sorted list of project files
:rtype: :class:`list`
"""
if is_git_project() and has_git():
return get_git_project_files()
project_files = []
for top, subdirs, files in os.walk('.'):
for subdir in subdirs:
if subdir.startswith('.'):
subdirs.remove(subdir)
for f in files:
if f.startswith('.'):
continue
project_files.append(os.path.join(top, f))
return project_files
def is_git_project():
return os.path.isdir('.git')
def has_git():
return bool(spawn.find_executable("git"))
def get_git_project_files():
"""Retrieve a list of all non-ignored files, including untracked files,
excluding deleted files.
:return: sorted list of git project files
:rtype: :class:`list`
"""
cached_and_untracked_files = git_ls_files(
'--cached', # All files cached in the index
'--others', # Untracked files
# Exclude untracked files that would be excluded by .gitignore, etc.
'--exclude-standard')
uncommitted_deleted_files = git_ls_files('--deleted')
# Since sorting of files in a set is arbitrary, return a sorted list to
# provide a well-defined order to tools like flake8, etc.
return sorted(cached_and_untracked_files - uncommitted_deleted_files)
def git_ls_files(*cmd_args):
"""Run ``git ls-files`` in the top-level project directory. Arguments go
directly to execution call.
:return: set of file names
:rtype: :class:`set`
"""
cmd = ['git', 'ls-files']
cmd.extend(cmd_args)
return set(subprocess.check_output(cmd).splitlines())
def print_success_message(message):
"""Print a message indicating success in green color to STDOUT.
:param message: the message to print
:type message: :class:`str`
"""
try:
import colorama
print(colorama.Fore.GREEN + message + colorama.Fore.RESET)
except ImportError:
print(message)
def print_failure_message(message):
"""Print a message indicating failure in red color to STDERR.
:param message: the message to print
:type message: :class:`str`
"""
try:
import colorama
print(colorama.Fore.RED + message + colorama.Fore.RESET,
file=sys.stderr)
except ImportError:
print(message, file=sys.stderr)
def read(filename):
"""Return the contents of a file.
:param filename: file path
:type filename: :class:`str`
:return: the file's content
:rtype: :class:`str`
"""
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return f.read()
def _lint():
"""Run lint and return an exit code."""
# Flake8 doesn't have an easy way to run checks using a Python function, so
# just fork off another process to do it.
# Python 3 compat:
# - The result of subprocess call outputs are byte strings, meaning we need
# to pass a byte string to endswith.
project_python_files = [filename for filename in get_project_files()
if filename.endswith(b'.py')]
retcode = subprocess.call(
['flake8', '--max-complexity=10'] + project_python_files)
if retcode == 0:
print_success_message('No style errors')
return retcode
def _test():
"""Run the unit tests.
:return: exit code
"""
# Make sure to import pytest in this function. For the reason, see here:
# <http://pytest.org/latest/goodpractises.html#integration-with-setuptools-test-commands> # NOPEP8
import pytest
# This runs the unit tests.
# It also runs doctest, but only on the modules in TESTS_DIRECTORY.
return pytest.main(PYTEST_FLAGS + [TESTS_DIRECTORY])
def _test_all():
"""Run lint and tests.
:return: exit code
"""
return _lint() + _test()
# The following code is to allow tests to be run with `python setup.py test'.
# The main reason to make this possible is to allow tests to be run as part of
# Setuptools' automatic run of 2to3 on the source code. The recommended way to
# run tests is still `paver test_all'.
# See <http://pythonhosted.org/setuptools/python3.html>
# Code based on <http://pytest.org/latest/goodpractises.html#integration-with-setuptools-test-commands> # NOPEP8
class TestAllCommand(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
# These are fake, and just set to appease distutils and setuptools.
self.test_suite = True
self.test_args = []
def run_tests(self):
raise SystemExit(_test_all())
# define install_requires for specific Python versions
python_version_specific_requires = []
# as of Python >= 2.7 and >= 3.2, the argparse module is maintained within
# the Python standard library, otherwise we install it as a separate package
if sys.version_info < (2, 7) or (3, 0) <= sys.version_info < (3, 3):
python_version_specific_requires.append('argparse')
# See here for more options:
# <http://pythonhosted.org/setuptools/setuptools.html>
setup_dict = dict(
name=metadata.package,
version=metadata.version,
author=metadata.authors[0],
author_email=metadata.emails[0],
maintainer=metadata.authors[0],
maintainer_email=metadata.emails[0],
url=metadata.url,
description=metadata.description,
long_description=read('README.rst'),
# Find a list of classifiers here:
# <http://pypi.python.org/pypi?%3Aaction=list_classifiers>
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Documentation',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Software Distribution',
],
packages=find_packages(exclude=(TESTS_DIRECTORY,)),
install_requires=[
# your module dependencies
] + python_version_specific_requires,
# Allow tests to be run with `python setup.py test'.
tests_require=[
'pytest==2.5.1',
'mock==1.0.1',
'flake8==2.1.0',
],
cmdclass={'test': TestAllCommand},
zip_safe=False, # don't use eggs
entry_points={
'console_scripts': [
'simple_web_generator_cli = simple_web_generator.main:entry_point'
],
# if you have a gui, use this
# 'gui_scripts': [
# 'simple_web_generator_gui = simple_web_generator.gui:entry_point'
# ]
}
)
def main():
setup(**setup_dict)
if __name__ == '__main__':
main()
| mit |
arborh/tensorflow | tensorflow/lite/experimental/micro/tools/make/merge_arduino_zips.py | 24 | 1646 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resolves non-system C/C++ includes to their full paths to help Arduino."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import shutil
import tempfile
import zipfile
def main(unparsed_args):
"""Merges multiple Arduino zipfiles into a single result."""
output_zip_path = unparsed_args[0]
input_zip_paths = unparsed_args[1::]
working_dir = tempfile.mkdtemp()
for input_zip_path in input_zip_paths:
with zipfile.ZipFile(input_zip_path, 'r') as input_zip:
input_zip.extractall(path=working_dir)
output_path_without_zip = output_zip_path.replace('.zip', '')
shutil.make_archive(output_path_without_zip, 'zip', working_dir)
def parse_args():
"""Converts the raw arguments into accessible flags."""
parser = argparse.ArgumentParser()
_, unparsed_args = parser.parse_known_args()
main(unparsed_args)
if __name__ == '__main__':
parse_args()
| apache-2.0 |
miguelinux/vbox | src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/UPT/GenMetaFile/GenInfFile.py | 1 | 44961 | ## @file GenInfFile.py
#
# This file contained the logical of transfer package object to INF files.
#
# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
GenInf
'''
import os
import stat
import codecs
import md5
from Core.FileHook import __FileHookOpen__
from Library.String import GetSplitValueList
from Library.Parsing import GenSection
from Library.Parsing import GetWorkspacePackage
from Library.Parsing import ConvertArchForInstall
from Library.Misc import SaveFileOnChange
from Library.Misc import IsAllModuleList
from Library.Misc import Sdict
from Library.Misc import ConvertPath
from Library.Misc import ConvertSpec
from Library.Misc import GetRelativePath
from Library.Misc import GetLocalValue
from Library.CommentGenerating import GenHeaderCommentSection
from Library.CommentGenerating import GenGenericCommentF
from Library.CommentGenerating import _GetHelpStr
from Library import GlobalData
from Logger import StringTable as ST
from Logger import ToolError
import Logger.Log as Logger
from Library import DataType as DT
from GenMetaFile import GenMetaFileMisc
from Library.UniClassObject import FormatUniEntry
## Transfer Module Object to Inf files
#
# Transfer all contents of a standard Module Object to an Inf file
# @param ModuleObject: A Module Object
#
def ModuleToInf(ModuleObject, PackageObject=None, DistHeader=None):
if not GlobalData.gWSPKG_LIST:
GlobalData.gWSPKG_LIST = GetWorkspacePackage()
#
# Init global information for the file
#
ContainerFile = ModuleObject.GetFullPath()
Content = ''
#
# Generate file header, If any Abstract, Description, Copyright or License XML elements are missing,
# should 1) use the Abstract, Description, Copyright or License from the PackageSurfaceArea.Header elements
# that the module belongs to, or 2) if this is a stand-alone module that is not included in a PackageSurfaceArea,
# use the abstract, description, copyright or license from the DistributionPackage.Header elements.
#
ModuleAbstract = GetLocalValue(ModuleObject.GetAbstract())
if not ModuleAbstract and PackageObject:
ModuleAbstract = GetLocalValue(PackageObject.GetAbstract())
if not ModuleAbstract and DistHeader:
ModuleAbstract = GetLocalValue(DistHeader.GetAbstract())
ModuleDescription = GetLocalValue(ModuleObject.GetDescription())
if not ModuleDescription and PackageObject:
ModuleDescription = GetLocalValue(PackageObject.GetDescription())
if not ModuleDescription and DistHeader:
ModuleDescription = GetLocalValue(DistHeader.GetDescription())
ModuleCopyright = ''
for (Lang, Copyright) in ModuleObject.GetCopyright():
if Lang:
pass
ModuleCopyright = Copyright
if not ModuleCopyright and PackageObject:
for (Lang, Copyright) in PackageObject.GetCopyright():
if Lang:
pass
ModuleCopyright = Copyright
if not ModuleCopyright and DistHeader:
for (Lang, Copyright) in DistHeader.GetCopyright():
if Lang:
pass
ModuleCopyright = Copyright
ModuleLicense = ''
for (Lang, License) in ModuleObject.GetLicense():
if Lang:
pass
ModuleLicense = License
if not ModuleLicense and PackageObject:
for (Lang, License) in PackageObject.GetLicense():
if Lang:
pass
ModuleLicense = License
if not ModuleLicense and DistHeader:
for (Lang, License) in DistHeader.GetLicense():
if Lang:
pass
ModuleLicense = License
#
# Generate header comment section of INF file
#
Content += GenHeaderCommentSection(ModuleAbstract,
ModuleDescription,
ModuleCopyright,
ModuleLicense).replace('\r\n', '\n')
#
# Generate Binary Header
#
for UserExtension in ModuleObject.GetUserExtensionList():
if UserExtension.GetUserID() == DT.TAB_BINARY_HEADER_USERID \
and UserExtension.GetIdentifier() == DT.TAB_BINARY_HEADER_IDENTIFIER:
ModuleBinaryAbstract = GetLocalValue(UserExtension.GetBinaryAbstract())
ModuleBinaryDescription = GetLocalValue(UserExtension.GetBinaryDescription())
ModuleBinaryCopyright = ''
ModuleBinaryLicense = ''
for (Lang, Copyright) in UserExtension.GetBinaryCopyright():
ModuleBinaryCopyright = Copyright
for (Lang, License) in UserExtension.GetBinaryLicense():
ModuleBinaryLicense = License
if ModuleBinaryAbstract and ModuleBinaryDescription and \
ModuleBinaryCopyright and ModuleBinaryLicense:
Content += GenHeaderCommentSection(ModuleBinaryAbstract,
ModuleBinaryDescription,
ModuleBinaryCopyright,
ModuleBinaryLicense,
True)
#
# Generate MODULE_UNI_FILE for module
#
FileHeader = GenHeaderCommentSection(ModuleAbstract, ModuleDescription, ModuleCopyright, ModuleLicense, False, \
DT.TAB_COMMENT_EDK1_SPLIT)
GenModuleUNIEncodeFile(ModuleObject, FileHeader)
#
# Judge whether the INF file is an AsBuild INF.
#
if ModuleObject.BinaryModule:
GlobalData.gIS_BINARY_INF = True
else:
GlobalData.gIS_BINARY_INF = False
#
# for each section, maintain a dict, sorted arch will be its key,
# statement list will be its data
# { 'Arch1 Arch2 Arch3': [statement1, statement2],
# 'Arch1' : [statement1, statement3]
# }
#
# Gen section contents
#
Content += GenDefines(ModuleObject)
Content += GenBuildOptions(ModuleObject)
Content += GenLibraryClasses(ModuleObject)
Content += GenPackages(ModuleObject)
Content += GenPcdSections(ModuleObject)
Content += GenSources(ModuleObject)
Content += GenProtocolPPiSections(ModuleObject.GetProtocolList(), True)
Content += GenProtocolPPiSections(ModuleObject.GetPpiList(), False)
Content += GenGuidSections(ModuleObject.GetGuidList())
Content += GenBinaries(ModuleObject)
Content += GenDepex(ModuleObject)
Content += GenUserExtensions(ModuleObject)
if ModuleObject.GetEventList() or ModuleObject.GetBootModeList() or ModuleObject.GetHobList():
Content += '\n'
#
# generate [Event], [BootMode], [Hob] section
#
Content += GenSpecialSections(ModuleObject.GetEventList(), 'Event')
Content += GenSpecialSections(ModuleObject.GetBootModeList(), 'BootMode')
Content += GenSpecialSections(ModuleObject.GetHobList(), 'Hob')
SaveFileOnChange(ContainerFile, Content, False)
if DistHeader.ReadOnly:
os.chmod(ContainerFile, stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH)
else:
os.chmod(ContainerFile, stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH|stat.S_IWUSR|stat.S_IWGRP|stat.S_IWOTH)
return ContainerFile
## GenModuleUNIEncodeFile
# GenModuleUNIEncodeFile, default is a UCS-2LE encode file
#
def GenModuleUNIEncodeFile(ModuleObject, UniFileHeader='', Encoding=DT.TAB_ENCODING_UTF16LE):
GenUNIFlag = False
OnlyLANGUAGE_EN_X = True
BinaryAbstract = []
BinaryDescription = []
#
# If more than one language code is used for any element that would be present in the MODULE_UNI_FILE,
# then the MODULE_UNI_FILE must be created.
#
for (Key, Value) in ModuleObject.GetAbstract() + ModuleObject.GetDescription():
if Key == DT.TAB_LANGUAGE_EN_X:
GenUNIFlag = True
else:
OnlyLANGUAGE_EN_X = False
for UserExtension in ModuleObject.GetUserExtensionList():
if UserExtension.GetUserID() == DT.TAB_BINARY_HEADER_USERID \
and UserExtension.GetIdentifier() == DT.TAB_BINARY_HEADER_IDENTIFIER:
for (Key, Value) in UserExtension.GetBinaryAbstract():
if Key == DT.TAB_LANGUAGE_EN_X:
GenUNIFlag = True
else:
OnlyLANGUAGE_EN_X = False
BinaryAbstract.append((Key, Value))
for (Key, Value) in UserExtension.GetBinaryDescription():
if Key == DT.TAB_LANGUAGE_EN_X:
GenUNIFlag = True
else:
OnlyLANGUAGE_EN_X = False
BinaryDescription.append((Key, Value))
if not GenUNIFlag:
return
elif OnlyLANGUAGE_EN_X:
return
else:
ModuleObject.UNIFlag = True
ContainerFile = os.path.normpath(os.path.join(os.path.dirname(ModuleObject.GetFullPath()),
(ModuleObject.GetBaseName() + '.uni')))
if not os.path.exists(os.path.dirname(ModuleObject.GetFullPath())):
os.makedirs(os.path.dirname(ModuleObject.GetFullPath()))
Content = UniFileHeader + '\r\n'
Content += '\r\n'
Content += FormatUniEntry('#string ' + DT.TAB_INF_ABSTRACT, ModuleObject.GetAbstract(), ContainerFile) + '\r\n'
Content += FormatUniEntry('#string ' + DT.TAB_INF_DESCRIPTION, ModuleObject.GetDescription(), ContainerFile) \
+ '\r\n'
BinaryAbstractString = FormatUniEntry('#string ' + DT.TAB_INF_BINARY_ABSTRACT, BinaryAbstract, ContainerFile)
if BinaryAbstractString:
Content += BinaryAbstractString + '\r\n'
BinaryDescriptionString = FormatUniEntry('#string ' + DT.TAB_INF_BINARY_DESCRIPTION, BinaryDescription, \
ContainerFile)
if BinaryDescriptionString:
Content += BinaryDescriptionString + '\r\n'
if not os.path.exists(ContainerFile):
File = codecs.open(ContainerFile, 'wb', Encoding)
File.write(u'\uFEFF' + Content)
File.stream.close()
Md5Sigature = md5.new(__FileHookOpen__(str(ContainerFile), 'rb').read())
Md5Sum = Md5Sigature.hexdigest()
if (ContainerFile, Md5Sum) not in ModuleObject.FileList:
ModuleObject.FileList.append((ContainerFile, Md5Sum))
return ContainerFile
def GenDefines(ModuleObject):
#
# generate [Defines] section
#
LeftOffset = 31
Content = ''
NewSectionDict = {}
for UserExtension in ModuleObject.GetUserExtensionList():
DefinesDict = UserExtension.GetDefinesDict()
if not DefinesDict:
continue
for Statement in DefinesDict:
if Statement.split(DT.TAB_EQUAL_SPLIT) > 1:
Statement = (u'%s ' % Statement.split(DT.TAB_EQUAL_SPLIT, 1)[0]).ljust(LeftOffset) \
+ u'= %s' % Statement.split(DT.TAB_EQUAL_SPLIT, 1)[1].lstrip()
SortedArch = DT.TAB_ARCH_COMMON
if Statement.strip().startswith(DT.TAB_INF_DEFINES_CUSTOM_MAKEFILE):
pos = Statement.find(DT.TAB_VALUE_SPLIT)
if pos == -1:
pos = Statement.find(DT.TAB_EQUAL_SPLIT)
Makefile = ConvertPath(Statement[pos + 1:].strip())
Statement = Statement[:pos + 1] + ' ' + Makefile
if SortedArch in NewSectionDict:
NewSectionDict[SortedArch] = NewSectionDict[SortedArch] + [Statement]
else:
NewSectionDict[SortedArch] = [Statement]
SpecialStatementList = []
# TAB_INF_DEFINES_INF_VERSION
Statement = (u'%s ' % DT.TAB_INF_DEFINES_INF_VERSION).ljust(LeftOffset) + u'= %s' % '0x00010017'
SpecialStatementList.append(Statement)
# BaseName
BaseName = ModuleObject.GetBaseName()
if BaseName.startswith('.') or BaseName.startswith('-'):
BaseName = '_' + BaseName
Statement = (u'%s ' % DT.TAB_INF_DEFINES_BASE_NAME).ljust(LeftOffset) + u'= %s' % BaseName
SpecialStatementList.append(Statement)
# TAB_INF_DEFINES_FILE_GUID
Statement = (u'%s ' % DT.TAB_INF_DEFINES_FILE_GUID).ljust(LeftOffset) + u'= %s' % ModuleObject.GetGuid()
SpecialStatementList.append(Statement)
# TAB_INF_DEFINES_VERSION_STRING
Statement = (u'%s ' % DT.TAB_INF_DEFINES_VERSION_STRING).ljust(LeftOffset) + u'= %s' % ModuleObject.GetVersion()
SpecialStatementList.append(Statement)
# TAB_INF_DEFINES_VERSION_STRING
if ModuleObject.UNIFlag:
Statement = (u'%s ' % DT.TAB_INF_DEFINES_MODULE_UNI_FILE).ljust(LeftOffset) + \
u'= %s' % ModuleObject.GetBaseName() + '.uni'
SpecialStatementList.append(Statement)
# TAB_INF_DEFINES_MODULE_TYPE
if ModuleObject.GetModuleType():
Statement = (u'%s ' % DT.TAB_INF_DEFINES_MODULE_TYPE).ljust(LeftOffset) + u'= %s' % ModuleObject.GetModuleType()
SpecialStatementList.append(Statement)
# TAB_INF_DEFINES_PCD_IS_DRIVER
if ModuleObject.GetPcdIsDriver():
Statement = (u'%s ' % DT.TAB_INF_DEFINES_PCD_IS_DRIVER).ljust(LeftOffset) + \
u'= %s' % ModuleObject.GetPcdIsDriver()
SpecialStatementList.append(Statement)
# TAB_INF_DEFINES_UEFI_SPECIFICATION_VERSION
if ModuleObject.GetUefiSpecificationVersion():
Statement = (u'%s ' % DT.TAB_INF_DEFINES_UEFI_SPECIFICATION_VERSION).ljust(LeftOffset) + \
u'= %s' % ModuleObject.GetUefiSpecificationVersion()
SpecialStatementList.append(Statement)
# TAB_INF_DEFINES_PI_SPECIFICATION_VERSION
if ModuleObject.GetPiSpecificationVersion():
Statement = (u'%s ' % DT.TAB_INF_DEFINES_PI_SPECIFICATION_VERSION).ljust(LeftOffset) + \
u'= %s' % ModuleObject.GetPiSpecificationVersion()
SpecialStatementList.append(Statement)
# LibraryClass
for LibraryClass in ModuleObject.GetLibraryClassList():
if LibraryClass.GetUsage() == DT.USAGE_ITEM_PRODUCES or \
LibraryClass.GetUsage() == DT.USAGE_ITEM_SOMETIMES_PRODUCES:
Statement = (u'%s ' % DT.TAB_INF_DEFINES_LIBRARY_CLASS).ljust(LeftOffset) + \
u'= %s' % LibraryClass.GetLibraryClass()
if LibraryClass.GetSupModuleList():
Statement += '|' + DT.TAB_SPACE_SPLIT.join(l for l in LibraryClass.GetSupModuleList())
SpecialStatementList.append(Statement)
# Spec Item
for SpecItem in ModuleObject.GetSpecList():
Spec, Version = SpecItem
Spec = ConvertSpec(Spec)
Statement = '%s %s = %s' % (DT.TAB_INF_DEFINES_SPEC, Spec, Version)
SpecialStatementList.append(Statement)
# Extern
ExternList = []
for Extern in ModuleObject.GetExternList():
ArchList = Extern.GetSupArchList()
EntryPoint = Extern.GetEntryPoint()
UnloadImage = Extern.GetUnloadImage()
Constructor = Extern.GetConstructor()
Destructor = Extern.GetDestructor()
HelpStringList = Extern.GetHelpTextList()
FFE = Extern.GetFeatureFlag()
ExternList.append([ArchList, EntryPoint, UnloadImage, Constructor, Destructor, FFE, HelpStringList])
#
# Add VALID_ARCHITECTURES information
#
ValidArchStatement = None
if ModuleObject.SupArchList:
ValidArchStatement = '\n' + '# ' + '\n'
ValidArchStatement += '# The following information is for reference only and not required by the build tools.\n'
ValidArchStatement += '# ' + '\n'
ValidArchStatement += '# VALID_ARCHITECTURES = %s' % (' '.join(ModuleObject.SupArchList)) + '\n'
ValidArchStatement += '# '
if DT.TAB_ARCH_COMMON not in NewSectionDict:
NewSectionDict[DT.TAB_ARCH_COMMON] = []
NewSectionDict[DT.TAB_ARCH_COMMON] = NewSectionDict[DT.TAB_ARCH_COMMON] + SpecialStatementList
GenMetaFileMisc.AddExternToDefineSec(NewSectionDict, DT.TAB_ARCH_COMMON, ExternList)
if ValidArchStatement is not None:
NewSectionDict[DT.TAB_ARCH_COMMON] = NewSectionDict[DT.TAB_ARCH_COMMON] + [ValidArchStatement]
Content += GenSection('Defines', NewSectionDict)
return Content
def GenLibraryClasses(ModuleObject):
#
# generate [LibraryClasses] section
#
Content = ''
NewSectionDict = {}
if not GlobalData.gIS_BINARY_INF:
for LibraryClass in ModuleObject.GetLibraryClassList():
if LibraryClass.GetUsage() == DT.USAGE_ITEM_PRODUCES:
continue
#
# Generate generic comment
#
HelpTextList = LibraryClass.GetHelpTextList()
HelpStr = _GetHelpStr(HelpTextList)
CommentStr = GenGenericCommentF(HelpStr)
Statement = CommentStr
Name = LibraryClass.GetLibraryClass()
FFE = LibraryClass.GetFeatureFlag()
Statement += Name
if FFE:
Statement += '|' + FFE
ModuleList = LibraryClass.GetSupModuleList()
ArchList = LibraryClass.GetSupArchList()
for Index in xrange(0, len(ArchList)):
ArchList[Index] = ConvertArchForInstall(ArchList[Index])
ArchList.sort()
SortedArch = ' '.join(ArchList)
KeyList = []
if not ModuleList or IsAllModuleList(ModuleList):
KeyList = [SortedArch]
else:
ModuleString = DT.TAB_VALUE_SPLIT.join(l for l in ModuleList)
if not ArchList:
SortedArch = DT.TAB_ARCH_COMMON
KeyList = [SortedArch + '.' + ModuleString]
else:
KeyList = [Arch + '.' + ModuleString for Arch in ArchList]
for Key in KeyList:
if Key in NewSectionDict:
NewSectionDict[Key] = NewSectionDict[Key] + [Statement]
else:
NewSectionDict[Key] = [Statement]
Content += GenSection('LibraryClasses', NewSectionDict)
else:
LibraryClassDict = {}
for BinaryFile in ModuleObject.GetBinaryFileList():
if not BinaryFile.AsBuiltList:
continue
for LibraryItem in BinaryFile.AsBuiltList[0].LibraryInstancesList:
Statement = '# Guid: ' + LibraryItem.Guid + ' Version: ' + LibraryItem.Version
if len(BinaryFile.SupArchList) == 0:
if LibraryClassDict.has_key('COMMON') and Statement not in LibraryClassDict['COMMON']:
LibraryClassDict['COMMON'].append(Statement)
else:
LibraryClassDict['COMMON'] = ['## @LIB_INSTANCES']
LibraryClassDict['COMMON'].append(Statement)
else:
for Arch in BinaryFile.SupArchList:
if LibraryClassDict.has_key(Arch):
if Statement not in LibraryClassDict[Arch]:
LibraryClassDict[Arch].append(Statement)
else:
continue
else:
LibraryClassDict[Arch] = ['## @LIB_INSTANCES']
LibraryClassDict[Arch].append(Statement)
Content += GenSection('LibraryClasses', LibraryClassDict)
return Content
def GenPackages(ModuleObject):
Content = ''
#
# generate [Packages] section
#
NewSectionDict = Sdict()
WorkspaceDir = GlobalData.gWORKSPACE
for PackageDependency in ModuleObject.GetPackageDependencyList():
#
# Generate generic comment
#
CommentStr = ''
HelpText = PackageDependency.GetHelpText()
if HelpText:
HelpStr = HelpText.GetString()
CommentStr = GenGenericCommentF(HelpStr)
Statement = CommentStr
Guid = PackageDependency.GetGuid()
Version = PackageDependency.GetVersion()
FFE = PackageDependency.GetFeatureFlag()
Path = ''
#
# find package path/name
#
for PkgInfo in GlobalData.gWSPKG_LIST:
if Guid == PkgInfo[1]:
if (not Version) or (Version == PkgInfo[2]):
Path = PkgInfo[3]
break
#
# get relative path
#
RelaPath = GetRelativePath(Path, WorkspaceDir)
Statement += RelaPath.replace('\\', '/')
if FFE:
Statement += '|' + FFE
ArchList = PackageDependency.GetSupArchList()
ArchList.sort()
SortedArch = ' '.join(ArchList)
if SortedArch in NewSectionDict:
NewSectionDict[SortedArch] = NewSectionDict[SortedArch] + [Statement]
else:
NewSectionDict[SortedArch] = [Statement]
Content += GenSection('Packages', NewSectionDict)
return Content
def GenSources(ModuleObject):
#
# generate [Sources] section
#
Content = ''
NewSectionDict = {}
for Source in ModuleObject.GetSourceFileList():
SourceFile = Source.GetSourceFile()
Family = Source.GetFamily()
FeatureFlag = Source.GetFeatureFlag()
SupArchList = Source.GetSupArchList()
SupArchList.sort()
SortedArch = ' '.join(SupArchList)
Statement = GenSourceStatement(ConvertPath(SourceFile), Family, FeatureFlag)
if SortedArch in NewSectionDict:
NewSectionDict[SortedArch] = NewSectionDict[SortedArch] + [Statement]
else:
NewSectionDict[SortedArch] = [Statement]
Content += GenSection('Sources', NewSectionDict)
return Content
def GenDepex(ModuleObject):
#
# generate [Depex] section
#
NewSectionDict = Sdict()
Content = ''
for Depex in ModuleObject.GetPeiDepex() + ModuleObject.GetDxeDepex() + ModuleObject.GetSmmDepex():
HelpTextList = Depex.GetHelpTextList()
HelpStr = _GetHelpStr(HelpTextList)
CommentStr = GenGenericCommentF(HelpStr)
SupArchList = Depex.GetSupArchList()
SupModList = Depex.GetModuleType()
Expression = Depex.GetDepex()
Statement = CommentStr + Expression
SupArchList.sort()
KeyList = []
if not SupArchList:
SupArchList.append(DT.TAB_ARCH_COMMON.lower())
if not SupModList:
KeyList = SupArchList
else:
for ModuleType in SupModList:
for Arch in SupArchList:
KeyList.append(ConvertArchForInstall(Arch) + '.' + ModuleType)
for Key in KeyList:
if Key in NewSectionDict:
NewSectionDict[Key] = NewSectionDict[Key] + [Statement]
else:
NewSectionDict[Key] = [Statement]
Content += GenSection('Depex', NewSectionDict, False)
return Content
## GenUserExtensions
#
# GenUserExtensions
#
def GenUserExtensions(ModuleObject):
NewSectionDict = {}
for UserExtension in ModuleObject.GetUserExtensionList():
if UserExtension.GetUserID() == DT.TAB_BINARY_HEADER_USERID and \
UserExtension.GetIdentifier() == DT.TAB_BINARY_HEADER_IDENTIFIER:
continue
if UserExtension.GetIdentifier() == 'Depex':
continue
Statement = UserExtension.GetStatement()
if not Statement:
continue
ArchList = UserExtension.GetSupArchList()
for Index in xrange(0, len(ArchList)):
ArchList[Index] = ConvertArchForInstall(ArchList[Index])
ArchList.sort()
KeyList = []
CommonPreFix = ''
if UserExtension.GetUserID():
CommonPreFix = UserExtension.GetUserID()
if CommonPreFix.find('.') > -1:
CommonPreFix = '"' + CommonPreFix + '"'
if UserExtension.GetIdentifier():
CommonPreFix += '.' + '"' + UserExtension.GetIdentifier() + '"'
if ArchList:
KeyList = [CommonPreFix + '.' + Arch for Arch in ArchList]
else:
KeyList = [CommonPreFix]
for Key in KeyList:
if Key in NewSectionDict:
NewSectionDict[Key] = NewSectionDict[Key] + [Statement]
else:
NewSectionDict[Key] = [Statement]
Content = GenSection('UserExtensions', NewSectionDict, False)
return Content
# GenSourceStatement
#
# @param SourceFile: string of source file path/name
# @param Family: string of source file family field
# @param FeatureFlag: string of source file FeatureFlag field
# @param TagName: string of source file TagName field
# @param ToolCode: string of source file ToolCode field
# @param HelpStr: string of source file HelpStr field
#
# @retval Statement: The generated statement for source
#
def GenSourceStatement(SourceFile, Family, FeatureFlag, TagName=None,
ToolCode=None, HelpStr=None):
Statement = ''
if HelpStr:
Statement += GenGenericCommentF(HelpStr)
#
# format of SourceFile|Family|TagName|ToolCode|FeatureFlag
#
Statement += SourceFile
if TagName == None:
TagName = ''
if ToolCode == None:
ToolCode = ''
if HelpStr == None:
HelpStr = ''
if FeatureFlag:
Statement += '|' + Family + '|' + TagName + '|' + ToolCode + '|' + FeatureFlag
elif ToolCode:
Statement += '|' + Family + '|' + TagName + '|' + ToolCode
elif TagName:
Statement += '|' + Family + '|' + TagName
elif Family:
Statement += '|' + Family
return Statement
# GenBinaryStatement
#
# @param Key: (FileName, FileType, FFE, SortedArch)
# @param Value: (Target, Family, TagName, Comment)
#
#
def GenBinaryStatement(Key, Value, SubTypeGuidValue=None):
(FileName, FileType, FFE, SortedArch) = Key
if SortedArch:
pass
if Value:
(Target, Family, TagName, Comment) = Value
else:
Target = ''
Family = ''
TagName = ''
Comment = ''
if Comment:
Statement = GenGenericCommentF(Comment)
else:
Statement = ''
if FileType == 'SUBTYPE_GUID' and SubTypeGuidValue:
Statement += FileType + '|' + SubTypeGuidValue + '|' + FileName
else:
Statement += FileType + '|' + FileName
if FileType in DT.BINARY_FILE_TYPE_UI_LIST + DT.BINARY_FILE_TYPE_VER_LIST:
if FFE:
Statement += '|' + Target + '|' + FFE
elif Target:
Statement += '|' + Target
else:
if FFE:
Statement += '|' + Target + '|' + Family + '|' + TagName + '|' + FFE
elif TagName:
Statement += '|' + Target + '|' + Family + '|' + TagName
elif Family:
Statement += '|' + Target + '|' + Family
elif Target:
Statement += '|' + Target
return Statement
## GenGuidSections
#
# @param GuidObjList: List of GuidObject
# @retVal Content: The generated section contents
#
def GenGuidSections(GuidObjList):
#
# generate [Guids] section
#
Content = ''
GuidDict = Sdict()
for Guid in GuidObjList:
HelpTextList = Guid.GetHelpTextList()
HelpStr = _GetHelpStr(HelpTextList)
CName = Guid.GetCName()
FFE = Guid.GetFeatureFlag()
Statement = CName
if FFE:
Statement += '|' + FFE
Usage = Guid.GetUsage()
GuidType = Guid.GetGuidTypeList()[0]
VariableName = Guid.GetVariableName()
#
# Differentiate the generic comment and usage comment as multiple generic comment need to be put at first
#
if Usage == DT.ITEM_UNDEFINED and GuidType == DT.ITEM_UNDEFINED:
# generate list of generic comment
Comment = GenGenericCommentF(HelpStr)
else:
# generate list of other comment
Comment = HelpStr.replace('\n', ' ')
Comment = Comment.strip()
if Comment:
Comment = ' # ' + Comment
else:
Comment = ''
if Usage != DT.ITEM_UNDEFINED and GuidType == DT.ITEM_UNDEFINED:
Comment = '## ' + Usage + Comment
elif GuidType == 'Variable':
Comment = '## ' + Usage + ' ## ' + GuidType + ':' + VariableName + Comment
else:
Comment = '## ' + Usage + ' ## ' + GuidType + Comment
if Comment:
Comment += '\n'
#
# merge duplicate items
#
ArchList = Guid.GetSupArchList()
ArchList.sort()
SortedArch = ' '.join(ArchList)
if (Statement, SortedArch) in GuidDict:
PreviousComment = GuidDict[Statement, SortedArch]
Comment = PreviousComment + Comment
GuidDict[Statement, SortedArch] = Comment
NewSectionDict = GenMetaFileMisc.TransferDict(GuidDict, 'INF_GUID')
#
# generate the section contents
#
if NewSectionDict:
Content = GenSection('Guids', NewSectionDict)
return Content
## GenProtocolPPiSections
#
# @param ObjList: List of ProtocolObject or Ppi Object
# @retVal Content: The generated section contents
#
def GenProtocolPPiSections(ObjList, IsProtocol):
Content = ''
Dict = Sdict()
for Object in ObjList:
HelpTextList = Object.GetHelpTextList()
HelpStr = _GetHelpStr(HelpTextList)
CName = Object.GetCName()
FFE = Object.GetFeatureFlag()
Statement = CName
if FFE:
Statement += '|' + FFE
Usage = Object.GetUsage()
Notify = Object.GetNotify()
#
# Differentiate the generic comment and usage comment as consecutive generic comment need to be put together
#
if Usage == DT.ITEM_UNDEFINED and Notify == '':
# generate list of generic comment
Comment = GenGenericCommentF(HelpStr)
else:
# generate list of other comment
Comment = HelpStr.replace('\n', ' ')
Comment = Comment.strip()
if Comment:
Comment = ' # ' + Comment
else:
Comment = ''
if Usage == DT.ITEM_UNDEFINED and not Comment and Notify == '':
Comment = ''
else:
if Notify:
Comment = '## ' + Usage + ' ## ' + 'NOTIFY' + Comment
else:
Comment = '## ' + Usage + Comment
if Comment:
Comment += '\n'
#
# merge duplicate items
#
ArchList = Object.GetSupArchList()
ArchList.sort()
SortedArch = ' '.join(ArchList)
if (Statement, SortedArch) in Dict:
PreviousComment = Dict[Statement, SortedArch]
Comment = PreviousComment + Comment
Dict[Statement, SortedArch] = Comment
NewSectionDict = GenMetaFileMisc.TransferDict(Dict, 'INF_PPI_PROTOCOL')
#
# generate the section contents
#
if NewSectionDict:
if IsProtocol:
Content = GenSection('Protocols', NewSectionDict)
else:
Content = GenSection('Ppis', NewSectionDict)
return Content
## GenPcdSections
#
#
def GenPcdSections(ModuleObject):
Content = ''
if not GlobalData.gIS_BINARY_INF:
#
# for each Pcd Itemtype, maintain a dict so the same type will be grouped
# together
#
ItemTypeDict = {}
for Pcd in ModuleObject.GetPcdList():
HelpTextList = Pcd.GetHelpTextList()
HelpStr = _GetHelpStr(HelpTextList)
Statement = ''
CName = Pcd.GetCName()
TokenSpaceGuidCName = Pcd.GetTokenSpaceGuidCName()
DefaultValue = Pcd.GetDefaultValue()
ItemType = Pcd.GetItemType()
if ItemType in ItemTypeDict:
Dict = ItemTypeDict[ItemType]
else:
Dict = Sdict()
ItemTypeDict[ItemType] = Dict
FFE = Pcd.GetFeatureFlag()
Statement += TokenSpaceGuidCName + '.' + CName
if DefaultValue:
Statement += '|' + DefaultValue
if FFE:
Statement += '|' + FFE
elif FFE:
Statement += '||' + FFE
#
# Generate comment
#
Usage = Pcd.GetValidUsage()
# if FeatureFlag Pcd, then assume all Usage is CONSUMES
if ItemType == DT.TAB_INF_FEATURE_PCD:
Usage = DT.USAGE_ITEM_CONSUMES
if Usage == DT.ITEM_UNDEFINED:
# generate list of generic comment
Comment = GenGenericCommentF(HelpStr)
else:
# generate list of other comment
Comment = HelpStr.replace('\n', ' ')
Comment = Comment.strip()
if Comment:
Comment = ' # ' + Comment
else:
Comment = ''
Comment = '## ' + Usage + Comment
if Comment:
Comment += '\n'
#
# Merge duplicate entries
#
ArchList = Pcd.GetSupArchList()
ArchList.sort()
SortedArch = ' '.join(ArchList)
if (Statement, SortedArch) in Dict:
PreviousComment = Dict[Statement, SortedArch]
Comment = PreviousComment + Comment
Dict[Statement, SortedArch] = Comment
for ItemType in ItemTypeDict:
# First we need to transfer the Dict to use SortedArch as key
Dict = ItemTypeDict[ItemType]
NewSectionDict = GenMetaFileMisc.TransferDict(Dict, 'INF_PCD')
if NewSectionDict:
Content += GenSection(ItemType, NewSectionDict)
#
# For AsBuild INF files
#
else:
Content += GenAsBuiltPacthPcdSections(ModuleObject)
Content += GenAsBuiltPcdExSections(ModuleObject)
return Content
## GenPcdSections
#
#
def GenAsBuiltPacthPcdSections(ModuleObject):
PatchPcdDict = {}
for BinaryFile in ModuleObject.GetBinaryFileList():
if not BinaryFile.AsBuiltList:
continue
for PatchPcd in BinaryFile.AsBuiltList[0].PatchPcdList:
TokenSpaceName = ''
PcdCName = PatchPcd.CName
PcdValue = PatchPcd.DefaultValue
PcdOffset = PatchPcd.Offset
TokenSpaceGuidValue = PatchPcd.TokenSpaceGuidValue
Token = PatchPcd.Token
HelpTextList = PatchPcd.HelpTextList
HelpString = ''
for HelpStringItem in HelpTextList:
for HelpLine in GetSplitValueList(HelpStringItem.String, '\n'):
HelpString += '## ' + HelpLine + '\n'
TokenSpaceName, PcdCName = GenMetaFileMisc.ObtainPcdName(ModuleObject.PackageDependencyList,
TokenSpaceGuidValue,
Token)
if TokenSpaceName == '' or PcdCName == '':
Logger.Error("Upt",
ToolError.RESOURCE_NOT_AVAILABLE,
ST.ERR_INSTALL_FILE_DEC_FILE_ERROR % (TokenSpaceGuidValue, Token),
File=ModuleObject.GetFullPath())
Statement = HelpString + TokenSpaceName + '.' + PcdCName + ' | ' + PcdValue + ' | ' + \
PcdOffset + DT.TAB_SPACE_SPLIT
#
# Use binary file's Arch to be Pcd's Arch
#
ArchList = []
FileNameObjList = BinaryFile.GetFileNameList()
if FileNameObjList:
ArchList = FileNameObjList[0].GetSupArchList()
if len(ArchList) == 0:
if PatchPcdDict.has_key(DT.TAB_ARCH_COMMON):
if Statement not in PatchPcdDict[DT.TAB_ARCH_COMMON]:
PatchPcdDict[DT.TAB_ARCH_COMMON].append(Statement)
else:
PatchPcdDict[DT.TAB_ARCH_COMMON] = [Statement]
else:
for Arch in ArchList:
if PatchPcdDict.has_key(Arch):
if Statement not in PatchPcdDict[Arch]:
PatchPcdDict[Arch].append(Statement)
else:
PatchPcdDict[Arch] = [Statement]
return GenSection(DT.TAB_INF_PATCH_PCD, PatchPcdDict)
## GenPcdSections
#
#
def GenAsBuiltPcdExSections(ModuleObject):
PcdExDict = {}
for BinaryFile in ModuleObject.GetBinaryFileList():
if not BinaryFile.AsBuiltList:
continue
for PcdExItem in BinaryFile.AsBuiltList[0].PcdExValueList:
TokenSpaceName = ''
PcdCName = PcdExItem.CName
TokenSpaceGuidValue = PcdExItem.TokenSpaceGuidValue
Token = PcdExItem.Token
HelpTextList = PcdExItem.HelpTextList
HelpString = ''
for HelpStringItem in HelpTextList:
for HelpLine in GetSplitValueList(HelpStringItem.String, '\n'):
HelpString += '## ' + HelpLine + '\n'
TokenSpaceName, PcdCName = GenMetaFileMisc.ObtainPcdName(ModuleObject.PackageDependencyList,
TokenSpaceGuidValue, Token)
if TokenSpaceName == '' or PcdCName == '':
Logger.Error("Upt",
ToolError.RESOURCE_NOT_AVAILABLE,
ST.ERR_INSTALL_FILE_DEC_FILE_ERROR % (TokenSpaceGuidValue, Token),
File=ModuleObject.GetFullPath())
Statement = HelpString + TokenSpaceName + DT.TAB_SPLIT + PcdCName + DT.TAB_SPACE_SPLIT
#
# Use binary file's Arch to be Pcd's Arch
#
ArchList = []
FileNameObjList = BinaryFile.GetFileNameList()
if FileNameObjList:
ArchList = FileNameObjList[0].GetSupArchList()
if len(ArchList) == 0:
if PcdExDict.has_key('COMMON'):
PcdExDict['COMMON'].append(Statement)
else:
PcdExDict['COMMON'] = [Statement]
else:
for Arch in ArchList:
if PcdExDict.has_key(Arch):
if Statement not in PcdExDict[Arch]:
PcdExDict[Arch].append(Statement)
else:
PcdExDict[Arch] = [Statement]
return GenSection('PcdEx', PcdExDict)
## GenSpecialSections
# generate special sections for Event/BootMode/Hob
#
def GenSpecialSections(ObjectList, SectionName):
#
# generate section
#
Content = ''
NewSectionDict = {}
for Obj in ObjectList:
#
# Generate comment
#
CommentStr = ''
HelpTextList = Obj.GetHelpTextList()
HelpStr = _GetHelpStr(HelpTextList)
CommentStr = GenGenericCommentF(HelpStr)
if SectionName == 'Hob':
Type = Obj.GetHobType()
elif SectionName == 'Event':
Type = Obj.GetEventType()
elif SectionName == 'BootMode':
Type = Obj.GetSupportedBootModes()
else:
assert(SectionName)
Usage = Obj.GetUsage()
Statement = ' ' + Type + ' ## ' + Usage
if CommentStr in ['#\n', '#\n#\n']:
CommentStr = '#\n#\n#\n'
#
# the first head comment line should start with '##\n', if it starts with '#\n', then add one '#'
# else add '##\n' to meet the format defined in INF spec
#
if CommentStr.startswith('#\n'):
CommentStr = '#' + CommentStr
elif CommentStr:
CommentStr = '##\n' + CommentStr
if CommentStr and not CommentStr.endswith('\n#\n'):
CommentStr = CommentStr + '#\n'
NewStateMent = CommentStr + Statement
SupArch = Obj.GetSupArchList()
SupArch.sort()
SortedArch = ' '.join(SupArch)
if SortedArch in NewSectionDict:
NewSectionDict[SortedArch] = NewSectionDict[SortedArch] + [NewStateMent]
else:
NewSectionDict[SortedArch] = [NewStateMent]
SectionContent = GenSection(SectionName, NewSectionDict)
SectionContent = SectionContent.strip()
if SectionContent:
Content = '# ' + ('\n' + '# ').join(GetSplitValueList(SectionContent, '\n'))
Content = Content.lstrip()
#
# add a return to differentiate it between other possible sections
#
if Content:
Content += '\n'
return Content
## GenBuildOptions
#
#
def GenBuildOptions(ModuleObject):
Content = ''
if not ModuleObject.BinaryModule:
#
# generate [BuildOptions] section
#
NewSectionDict = {}
for UserExtension in ModuleObject.GetUserExtensionList():
BuildOptionDict = UserExtension.GetBuildOptionDict()
if not BuildOptionDict:
continue
for Arch in BuildOptionDict:
if Arch in NewSectionDict:
NewSectionDict[Arch] = NewSectionDict[Arch] + [BuildOptionDict[Arch]]
else:
NewSectionDict[Arch] = [BuildOptionDict[Arch]]
Content = GenSection('BuildOptions', NewSectionDict)
else:
BuildOptionDict = {}
for BinaryFile in ModuleObject.GetBinaryFileList():
if not BinaryFile.AsBuiltList:
continue
for BuilOptionItem in BinaryFile.AsBuiltList[0].BinaryBuildFlagList:
Statement = '#' + BuilOptionItem.AsBuiltOptionFlags
if len(BinaryFile.SupArchList) == 0:
if BuildOptionDict.has_key('COMMON'):
if Statement not in BuildOptionDict['COMMON']:
BuildOptionDict['COMMON'].append(Statement)
else:
BuildOptionDict['COMMON'] = ['## @AsBuilt']
BuildOptionDict['COMMON'].append(Statement)
else:
for Arch in BinaryFile.SupArchList:
if BuildOptionDict.has_key(Arch):
if Statement not in BuildOptionDict[Arch]:
BuildOptionDict[Arch].append(Statement)
else:
BuildOptionDict[Arch] = ['## @AsBuilt']
BuildOptionDict[Arch].append(Statement)
Content = GenSection('BuildOptions', BuildOptionDict)
return Content
## GenBinaries
#
#
def GenBinaries(ModuleObject):
NewSectionDict = {}
BinariesDict = []
for UserExtension in ModuleObject.GetUserExtensionList():
BinariesDict = UserExtension.GetBinariesDict()
if BinariesDict:
break
for BinaryFile in ModuleObject.GetBinaryFileList():
FileNameObjList = BinaryFile.GetFileNameList()
for FileNameObj in FileNameObjList:
FileName = ConvertPath(FileNameObj.GetFilename())
FileType = FileNameObj.GetFileType()
FFE = FileNameObj.GetFeatureFlag()
ArchList = FileNameObj.GetSupArchList()
ArchList.sort()
SortedArch = ' '.join(ArchList)
Key = (FileName, FileType, FFE, SortedArch)
if Key in BinariesDict:
ValueList = BinariesDict[Key]
for ValueItem in ValueList:
Statement = GenBinaryStatement(Key, ValueItem)
if SortedArch in NewSectionDict:
NewSectionDict[SortedArch] = NewSectionDict[SortedArch] + [Statement]
else:
NewSectionDict[SortedArch] = [Statement]
#
# as we already generated statement for this DictKey here set the Valuelist to be empty
# to avoid generate duplicate entries as the DictKey may have multiple entries
#
BinariesDict[Key] = []
else:
if FileType == 'SUBTYPE_GUID' and FileNameObj.GetGuidValue():
Statement = GenBinaryStatement(Key, None, FileNameObj.GetGuidValue())
else:
Statement = GenBinaryStatement(Key, None)
if SortedArch in NewSectionDict:
NewSectionDict[SortedArch] = NewSectionDict[SortedArch] + [Statement]
else:
NewSectionDict[SortedArch] = [Statement]
Content = GenSection('Binaries', NewSectionDict)
return Content
| gpl-2.0 |
btenaglia/hpc-historias-clinicas | hpc-historias-clinicas/inter_consultas/views.py | 1 | 3625 | # -*- coding: utf-8 -*-
from django.shortcuts import render
from django.views.generic import ListView, CreateView, UpdateView, DeleteView
from django.contrib import messages
from braces.views import LoginRequiredMixin
from .models import InterConsultas
from ..historias.models import Historias
class InterConsultasMixin(object):
def success_msg(self):
return NotImplemented
def descarga_msg(self, ic_id):
return " Click en el siguiente link para <a target='_blank' href='/reportes/inter/consultas/%s'>Descargar e Imprimir</a>" % str(ic_id)
def get_context_data(self, **kwargs):
"""Es necesario traer los datos de la historia clinica"""
ctx = super(InterConsultasMixin, self).get_context_data(**kwargs)
ctx['historia'] = Historias.objects.filter(id=self.kwargs['historia']).get()
return ctx
def get_success_url(self):
messages.success(self.request, self.success_msg)
return '/inter/consultas/%s' % (self.kwargs['historia'])
class InterConsultasListView(LoginRequiredMixin, InterConsultasMixin, ListView):
"""
Obtengo las inter-consultas de una historia clinica
"""
def get_queryset(self):
qs = InterConsultas.objects.filter(historia=self.kwargs['historia'])
# filtro por fecha ingreso
fecha = self.request.GET.get('fecha')
if fecha:
date = fecha.split('/')
date = date[2]+'-'+date[1]+'-'+date[0]
qs = qs.filter(fecha=date)
# filtro por palabra clave
palabra_clave = self.request.GET.get('palabra_clave')
if palabra_clave:
qs = qs.filter(descripcion__icontains=palabra_clave)
return qs
class InterConsultasCreateView(LoginRequiredMixin, InterConsultasMixin, CreateView):
""" Creacion de una inter consulta """
model = InterConsultas
fields = ['fecha', 'descripcion']
success_msg = 'La inter consulta se agregó con éxito.'
def get_success_url(self):
# -- armo el msg para la descarga de la inter consulta
# -- obtengo el ultimo id ingresado, es necesario para
# -- armar la ulr de descarga
pk = InterConsultas.objects.latest('id').id
if pk:
self.success_msg += self.descarga_msg(pk)
return super(InterConsultasCreateView, self).get_success_url()
def post(self, request, *args, **kwargs):
# -- Es necesario indicarle el Id de la historia
form_class = self.get_form_class()
form = self.get_form(form_class)
self.object = None
form.instance.historia_id = self.kwargs['historia']
if form.is_valid():
form.save()
return self.form_valid(form)
else:
return self.form_invalid(**{'form': form})
class InterConsultasUpdateView(LoginRequiredMixin, InterConsultasMixin, UpdateView):
""" Edicion de una inter consulta """
model = InterConsultas
fields = ['fecha', 'descripcion']
success_msg = 'La inter consulta se editó con éxito.'
def get_success_url(self):
# -- armo el msg para la descarga de la inter consulta
# -- obtengo el ultimo id ingresado, es necesario para
# -- armar la ulr de descarga
pk = self.kwargs['pk']
if pk:
self.success_msg += self.descarga_msg(pk)
return super(InterConsultasUpdateView, self).get_success_url()
class InterConsultasDeleteView(LoginRequiredMixin, InterConsultasMixin, DeleteView):
""" Eliminar una inter consulta """
model = InterConsultas
success_msg = 'La inter consulta se eliminó con éxito.'
| bsd-3-clause |
mihneadb/suse_bug_reporter | bugreporter/gathering_modules/cmdline.py | 1 | 1128 | '''
Copyright (C) 2011 Mihnea Dobrescu-Balaur
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
from subprocess import Popen, PIPE
def gather_from_cmdline():
''' returns a list of strings which represent the kernel boot arguments '''
output = Popen(('cat', '/proc/cmdline'), stdout=PIPE).communicate()[0]
# process the output
output = output.split()
output = output[3:]
return output
if __name__ == '__main__':
test = gather_from_cmdline()
print test
| gpl-2.0 |
alajara/servo | python/servo/command_base.py | 1 | 24514 | # Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
from glob import glob
import gzip
import itertools
import locale
import os
from os import path
import contextlib
import subprocess
from subprocess import PIPE
import sys
import tarfile
from mach.registrar import Registrar
import toml
from servo.packages import WINDOWS_MSVC as msvc_deps
from servo.util import host_triple, host_platform
BIN_SUFFIX = ".exe" if sys.platform == "win32" else ""
@contextlib.contextmanager
def cd(new_path):
"""Context manager for changing the current working directory"""
previous_path = os.getcwd()
try:
os.chdir(new_path)
yield
finally:
os.chdir(previous_path)
@contextlib.contextmanager
def setlocale(name):
"""Context manager for changing the current locale"""
saved_locale = locale.setlocale(locale.LC_ALL)
try:
yield locale.setlocale(locale.LC_ALL, name)
finally:
locale.setlocale(locale.LC_ALL, saved_locale)
def find_dep_path_newest(package, bin_path):
deps_path = path.join(path.split(bin_path)[0], "build")
candidates = []
with cd(deps_path):
for c in glob(package + '-*'):
candidate_path = path.join(deps_path, c)
if path.exists(path.join(candidate_path, "output")):
candidates.append(candidate_path)
if candidates:
return max(candidates, key=lambda c: path.getmtime(path.join(c, "output")))
return None
def get_browserhtml_path(binary_path):
browserhtml_path = find_dep_path_newest('browserhtml', binary_path)
if browserhtml_path:
return path.join(browserhtml_path, "out")
sys.exit("Could not find browserhtml package; perhaps you haven't built Servo.")
def archive_deterministically(dir_to_archive, dest_archive, prepend_path=None):
"""Create a .tar.gz archive in a deterministic (reproducible) manner.
See https://reproducible-builds.org/docs/archives/ for more details."""
def reset(tarinfo):
"""Helper to reset owner/group and modification time for tar entries"""
tarinfo.uid = tarinfo.gid = 0
tarinfo.uname = tarinfo.gname = "root"
tarinfo.mtime = 0
return tarinfo
dest_archive = os.path.abspath(dest_archive)
with cd(dir_to_archive):
current_dir = "."
file_list = [current_dir]
for root, dirs, files in os.walk(current_dir):
for name in itertools.chain(dirs, files):
file_list.append(os.path.join(root, name))
# Sort file entries with the fixed locale
with setlocale('C'):
file_list.sort(cmp=locale.strcoll)
# Use a temporary file and atomic rename to avoid partially-formed
# packaging (in case of exceptional situations like running out of disk space).
# TODO do this in a temporary folder after #11983 is fixed
temp_file = '{}.temp~'.format(dest_archive)
with os.fdopen(os.open(temp_file, os.O_WRONLY | os.O_CREAT, 0644), 'w') as out_file:
with gzip.GzipFile('wb', fileobj=out_file, mtime=0) as gzip_file:
with tarfile.open(fileobj=gzip_file, mode='w:') as tar_file:
for entry in file_list:
arcname = entry
if prepend_path is not None:
arcname = os.path.normpath(os.path.join(prepend_path, arcname))
tar_file.add(entry, filter=reset, recursive=False, arcname=arcname)
os.rename(temp_file, dest_archive)
def normalize_env(env):
# There is a bug in subprocess where it doesn't like unicode types in
# environment variables. Here, ensure all unicode are converted to
# binary. utf-8 is our globally assumed default. If the caller doesn't
# want UTF-8, they shouldn't pass in a unicode instance.
normalized_env = {}
for k, v in env.items():
if isinstance(k, unicode):
k = k.encode('utf-8', 'strict')
if isinstance(v, unicode):
v = v.encode('utf-8', 'strict')
normalized_env[k] = v
return normalized_env
def call(*args, **kwargs):
"""Wrap `subprocess.call`, printing the command if verbose=True."""
verbose = kwargs.pop('verbose', False)
if verbose:
print(' '.join(args[0]))
if 'env' in kwargs:
kwargs['env'] = normalize_env(kwargs['env'])
# we have to use shell=True in order to get PATH handling
# when looking for the binary on Windows
return subprocess.call(*args, shell=sys.platform == 'win32', **kwargs)
def check_call(*args, **kwargs):
"""Wrap `subprocess.check_call`, printing the command if verbose=True.
Also fix any unicode-containing `env`, for subprocess """
verbose = kwargs.pop('verbose', False)
if 'env' in kwargs:
kwargs['env'] = normalize_env(kwargs['env'])
if verbose:
print(' '.join(args[0]))
# we have to use shell=True in order to get PATH handling
# when looking for the binary on Windows
proc = subprocess.Popen(*args, shell=sys.platform == 'win32', **kwargs)
status = None
# Leave it to the subprocess to handle Ctrl+C. If it terminates as
# a result of Ctrl+C, proc.wait() will return a status code, and,
# we get out of the loop. If it doesn't, like e.g. gdb, we continue
# waiting.
while status is None:
try:
status = proc.wait()
except KeyboardInterrupt:
pass
if status:
raise subprocess.CalledProcessError(status, ' '.join(*args))
def is_windows():
return sys.platform == 'win32'
def is_macosx():
return sys.platform == 'darwin'
def is_linux():
return sys.platform.startswith('linux')
def set_osmesa_env(bin_path, env):
"""Set proper LD_LIBRARY_PATH and DRIVE for software rendering on Linux and OSX"""
if is_linux():
dep_path = find_dep_path_newest('osmesa-src', bin_path)
if not dep_path:
return None
osmesa_path = path.join(dep_path, "out", "lib", "gallium")
env["LD_LIBRARY_PATH"] = osmesa_path
env["GALLIUM_DRIVER"] = "softpipe"
elif is_macosx():
osmesa_path = path.join(find_dep_path_newest('osmesa-src', bin_path),
"out", "src", "gallium", "targets", "osmesa", ".libs")
glapi_path = path.join(find_dep_path_newest('osmesa-src', bin_path),
"out", "src", "mapi", "shared-glapi", ".libs")
if not (osmesa_path and glapi_path):
return None
env["DYLD_LIBRARY_PATH"] = osmesa_path + ":" + glapi_path
env["GALLIUM_DRIVER"] = "softpipe"
return env
class BuildNotFound(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class CommandBase(object):
"""Base class for mach command providers.
This mostly handles configuration management, such as .servobuild."""
def __init__(self, context):
self.context = context
def get_env_bool(var, default):
# Contents of env vars are strings by default. This returns the
# boolean value of the specified environment variable, or the
# speciried default if the var doesn't contain True or False
return {'True': True, 'False': False}.get(os.environ.get(var), default)
def resolverelative(category, key):
# Allow ~
self.config[category][key] = path.expanduser(self.config[category][key])
# Resolve relative paths
self.config[category][key] = path.join(context.topdir,
self.config[category][key])
if not hasattr(self.context, "bootstrapped"):
self.context.bootstrapped = False
config_path = path.join(context.topdir, ".servobuild")
if path.exists(config_path):
with open(config_path) as f:
self.config = toml.loads(f.read())
else:
self.config = {}
# Handle missing/default items
self.config.setdefault("tools", {})
default_cache_dir = os.environ.get("SERVO_CACHE_DIR",
path.join(context.topdir, ".servo"))
self.config["tools"].setdefault("cache-dir", default_cache_dir)
resolverelative("tools", "cache-dir")
default_cargo_home = os.environ.get("CARGO_HOME",
path.join(context.topdir, ".cargo"))
self.config["tools"].setdefault("cargo-home-dir", default_cargo_home)
resolverelative("tools", "cargo-home-dir")
context.sharedir = self.config["tools"]["cache-dir"]
self.config["tools"].setdefault("system-rust", False)
self.config["tools"].setdefault("system-cargo", False)
self.config["tools"].setdefault("rust-root", "")
self.config["tools"].setdefault("cargo-root", "")
self.config["tools"].setdefault("rustc-with-gold", get_env_bool("SERVO_RUSTC_WITH_GOLD", True))
# https://github.com/rust-lang/rust/pull/39754
platforms_with_rustc_alt_builds = ["unknown-linux-gnu", "apple-darwin", "pc-windows-msvc"]
llvm_assertions_default = ("SERVO_RUSTC_LLVM_ASSERTIONS" in os.environ
or host_platform() not in platforms_with_rustc_alt_builds)
self.config.setdefault("build", {})
self.config["build"].setdefault("android", False)
self.config["build"].setdefault("mode", "")
self.config["build"].setdefault("llvm-assertions", llvm_assertions_default)
self.config["build"].setdefault("debug-mozjs", False)
self.config["build"].setdefault("ccache", "")
self.config["build"].setdefault("rustflags", "")
self.config["build"].setdefault("incremental", False)
self.config.setdefault("android", {})
self.config["android"].setdefault("sdk", "")
self.config["android"].setdefault("ndk", "")
self.config["android"].setdefault("toolchain", "")
self.config["android"].setdefault("platform", "android-18")
self.config["android"].setdefault("target", "arm-linux-androideabi")
self.set_cargo_root()
self.set_use_stable_rust(False)
_use_stable_rust = False
_rust_version = None
_rust_version_is_stable = False
_cargo_build_id = None
def set_cargo_root(self):
if not self.config["tools"]["system-cargo"]:
self.config["tools"]["cargo-root"] = path.join(
self.context.sharedir, "cargo", self.cargo_build_id())
def set_use_stable_rust(self, use_stable_rust=True):
self._use_stable_rust = use_stable_rust
if not self.config["tools"]["system-rust"]:
self.config["tools"]["rust-root"] = path.join(
self.context.sharedir, "rust", self.rust_path())
if use_stable_rust:
# Cargo maintainer's position is that CARGO_INCREMENTAL is a nightly-only feature
# and should not be used on the stable channel.
# https://github.com/rust-lang/cargo/issues/3835
self.config["build"]["incremental"] = False
def use_stable_rust(self):
return self._use_stable_rust
def rust_path(self):
version = self.rust_version()
if self._use_stable_rust:
return os.path.join(version, "rustc-%s-%s" % (version, host_triple()))
if not self.config["build"]["llvm-assertions"]:
version += "-alt"
return os.path.join(version, "rustc-nightly-%s" % (host_triple()))
def rust_version(self):
if self._rust_version is None or self._use_stable_rust != self._rust_version_is_stable:
filename = path.join(self.context.topdir,
"rust-stable-version" if self._use_stable_rust else "rust-commit-hash")
with open(filename) as f:
self._rust_version = f.read().strip()
return self._rust_version
def cargo_build_id(self):
if self._cargo_build_id is None:
filename = path.join(self.context.topdir, "cargo-commit-hash")
with open(filename) as f:
self._cargo_build_id = f.read().strip()
return self._cargo_build_id
def get_top_dir(self):
return self.context.topdir
def get_target_dir(self):
if "CARGO_TARGET_DIR" in os.environ:
return os.environ["CARGO_TARGET_DIR"]
else:
return path.join(self.context.topdir, "target")
def get_binary_path(self, release, dev, android=False):
# TODO(autrilla): this function could still use work - it shouldn't
# handle quitting, or printing. It should return the path, or an error.
base_path = self.get_target_dir()
if android:
base_path = path.join(base_path, self.config["android"]["target"])
binary_name = "servo" + BIN_SUFFIX
release_path = path.join(base_path, "release", binary_name)
dev_path = path.join(base_path, "debug", binary_name)
# Prefer release if both given
if release and dev:
dev = False
release_exists = path.exists(release_path)
dev_exists = path.exists(dev_path)
if not release_exists and not dev_exists:
raise BuildNotFound('No Servo binary found.'
' Perhaps you forgot to run `./mach build`?')
if release and release_exists:
return release_path
if dev and dev_exists:
return dev_path
if not dev and not release and release_exists and dev_exists:
print("You have multiple profiles built. Please specify which "
"one to run with '--release' or '--dev'.")
sys.exit()
if not dev and not release:
if release_exists:
return release_path
else:
return dev_path
print("The %s profile is not built. Please run './mach build%s' "
"and try again." % ("release" if release else "dev",
" --release" if release else ""))
sys.exit()
def build_env(self, hosts_file_path=None, target=None, is_build=False, geckolib=False):
"""Return an extended environment dictionary."""
env = os.environ.copy()
if sys.platform == "win32" and type(env['PATH']) == unicode:
# On win32, the virtualenv's activate_this.py script sometimes ends up
# turning os.environ['PATH'] into a unicode string. This doesn't work
# for passing env vars in to a process, so we force it back to ascii.
# We don't use UTF8 since that won't be correct anyway; if you actually
# have unicode stuff in your path, all this PATH munging would have broken
# it in any case.
env['PATH'] = env['PATH'].encode('ascii', 'ignore')
extra_path = []
extra_lib = []
if "msvc" in (target or host_triple()):
msvc_x64 = "64" if "x86_64" in (target or host_triple()) else ""
msvc_deps_dir = path.join(self.context.sharedir, "msvc-dependencies")
def package_dir(package):
return path.join(msvc_deps_dir, package, msvc_deps[package])
extra_path += [path.join(package_dir("cmake"), "bin")]
extra_path += [path.join(package_dir("ninja"), "bin")]
# Link openssl
env["OPENSSL_INCLUDE_DIR"] = path.join(package_dir("openssl"), "include")
env["OPENSSL_LIB_DIR"] = path.join(package_dir("openssl"), "lib" + msvc_x64)
env["OPENSSL_LIBS"] = "libsslMD:libcryptoMD"
# Link moztools
env["MOZTOOLS_PATH"] = path.join(package_dir("moztools"), "bin")
if is_windows():
if not os.environ.get("NATIVE_WIN32_PYTHON"):
env["NATIVE_WIN32_PYTHON"] = sys.executable
# Always build harfbuzz from source
env["HARFBUZZ_SYS_NO_PKG_CONFIG"] = "true"
if not self.config["tools"]["system-rust"] \
or self.config["tools"]["rust-root"]:
env["RUST_ROOT"] = self.config["tools"]["rust-root"]
# These paths are for when rust-root points to an unpacked installer
extra_path += [path.join(self.config["tools"]["rust-root"], "rustc", "bin")]
extra_lib += [path.join(self.config["tools"]["rust-root"], "rustc", "lib")]
# These paths are for when rust-root points to a rustc sysroot
extra_path += [path.join(self.config["tools"]["rust-root"], "bin")]
extra_lib += [path.join(self.config["tools"]["rust-root"], "lib")]
if not self.config["tools"]["system-cargo"] \
or self.config["tools"]["cargo-root"]:
# This path is for when rust-root points to an unpacked installer
extra_path += [
path.join(self.config["tools"]["cargo-root"], "cargo", "bin")]
# This path is for when rust-root points to a rustc sysroot
extra_path += [
path.join(self.config["tools"]["cargo-root"], "bin")]
if extra_path:
env["PATH"] = "%s%s%s" % (os.pathsep.join(extra_path), os.pathsep, env["PATH"])
env["CARGO_HOME"] = self.config["tools"]["cargo-home-dir"]
if self.config["build"]["incremental"]:
env["CARGO_INCREMENTAL"] = "1"
if extra_lib:
if sys.platform == "darwin":
env["DYLD_LIBRARY_PATH"] = "%s%s%s" % \
(os.pathsep.join(extra_lib),
os.pathsep,
env.get("DYLD_LIBRARY_PATH", ""))
else:
env["LD_LIBRARY_PATH"] = "%s%s%s" % \
(os.pathsep.join(extra_lib),
os.pathsep,
env.get("LD_LIBRARY_PATH", ""))
# Paths to Android build tools:
if self.config["android"]["sdk"]:
env["ANDROID_SDK"] = self.config["android"]["sdk"]
if self.config["android"]["ndk"]:
env["ANDROID_NDK"] = self.config["android"]["ndk"]
if self.config["android"]["toolchain"]:
env["ANDROID_TOOLCHAIN"] = self.config["android"]["toolchain"]
if self.config["android"]["platform"]:
env["ANDROID_PLATFORM"] = self.config["android"]["platform"]
# These are set because they are the variable names that build-apk
# expects. However, other submodules have makefiles that reference
# the env var names above. Once glutin is enabled and set as the
# default, we could modify the subproject makefiles to use the names
# below and remove the vars above, to avoid duplication.
if "ANDROID_SDK" in env:
env["ANDROID_HOME"] = env["ANDROID_SDK"]
if "ANDROID_NDK" in env:
env["NDK_HOME"] = env["ANDROID_NDK"]
if "ANDROID_TOOLCHAIN" in env:
env["NDK_STANDALONE"] = env["ANDROID_TOOLCHAIN"]
if hosts_file_path:
env['HOST_FILE'] = hosts_file_path
env['RUSTDOC'] = path.join(self.context.topdir, 'etc', 'rustdoc-with-private')
if self.config["build"]["rustflags"]:
env['RUSTFLAGS'] = env.get('RUSTFLAGS', "") + " " + self.config["build"]["rustflags"]
# Don't run the gold linker if on Windows https://github.com/servo/servo/issues/9499
if self.config["tools"]["rustc-with-gold"] and sys.platform != "win32":
if subprocess.call(['which', 'ld.gold'], stdout=PIPE, stderr=PIPE) == 0:
env['RUSTFLAGS'] = env.get('RUSTFLAGS', "") + " -C link-args=-fuse-ld=gold"
if not (self.config["build"]["ccache"] == ""):
env['CCACHE'] = self.config["build"]["ccache"]
# Ensure Rust uses hard floats and SIMD on ARM devices
if target:
if target.startswith('arm') or target.startswith('aarch64'):
env['RUSTFLAGS'] = env.get('RUSTFLAGS', "") + " -C target-feature=+neon"
env['RUSTFLAGS'] = env.get('RUSTFLAGS', "") + " -W unused-extern-crates"
git_info = []
if os.path.isdir('.git') and is_build:
git_sha = subprocess.check_output([
'git', 'rev-parse', '--short', 'HEAD'
]).strip()
git_is_dirty = bool(subprocess.check_output([
'git', 'status', '--porcelain'
]).strip())
git_info.append('')
git_info.append(git_sha)
if git_is_dirty:
git_info.append('dirty')
env['GIT_INFO'] = '-'.join(git_info)
if geckolib:
geckolib_build_path = path.join(self.context.topdir, "target", "geckolib").encode("UTF-8")
env["CARGO_TARGET_DIR"] = geckolib_build_path
return env
def servo_crate(self):
return path.join(self.context.topdir, "ports", "servo")
def servo_features(self):
"""Return a list of optional features to enable for the Servo crate"""
features = []
if self.config["build"]["debug-mozjs"]:
features += ["debugmozjs"]
return features
def android_support_dir(self):
return path.join(self.context.topdir, "support", "android")
def android_build_dir(self, dev):
return path.join(self.get_target_dir(), "arm-linux-androideabi", "debug" if dev else "release")
def ensure_bootstrapped(self, target=None):
if self.context.bootstrapped:
return
target_platform = target or host_triple()
rust_root = self.config["tools"]["rust-root"]
rustc_path = path.join(
rust_root, "rustc", "bin", "rustc" + BIN_SUFFIX
)
rustc_binary_exists = path.exists(rustc_path)
base_target_path = path.join(rust_root, "rustc", "lib", "rustlib")
target_path = path.join(base_target_path, target_platform)
target_exists = path.exists(target_path)
# Always check if all needed MSVC dependencies are installed
if "msvc" in target_platform:
Registrar.dispatch("bootstrap", context=self.context)
if not (self.config['tools']['system-rust'] or (rustc_binary_exists and target_exists)):
print("looking for rustc at %s" % (rustc_path))
Registrar.dispatch("bootstrap-rust", context=self.context, target=filter(None, [target]),
stable=self._use_stable_rust)
cargo_path = path.join(self.config["tools"]["cargo-root"], "cargo", "bin",
"cargo" + BIN_SUFFIX)
cargo_binary_exists = path.exists(cargo_path)
if not self.config["tools"]["system-cargo"] and not cargo_binary_exists:
Registrar.dispatch("bootstrap-cargo", context=self.context)
self.context.bootstrapped = True
def ensure_clobbered(self, target_dir=None):
if target_dir is None:
target_dir = self.get_target_dir()
auto = True if os.environ.get('AUTOCLOBBER', False) else False
src_clobber = os.path.join(self.context.topdir, 'CLOBBER')
target_clobber = os.path.join(target_dir, 'CLOBBER')
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if not os.path.exists(target_clobber):
# Simply touch the file.
with open(target_clobber, 'a'):
pass
if auto:
if os.path.getmtime(src_clobber) > os.path.getmtime(target_clobber):
print('Automatically clobbering target directory: {}'.format(target_dir))
try:
Registrar.dispatch("clean", context=self.context, verbose=True)
print('Successfully completed auto clobber.')
except subprocess.CalledProcessError as error:
sys.exit(error)
else:
print("Clobber not needed.")
| mpl-2.0 |
fossoult/odoo | openerp/addons/test_impex/tests/test_export.py | 158 | 19124 | # -*- coding: utf-8 -*-
import itertools
import openerp.modules.registry
import openerp
from openerp.tests import common
class CreatorCase(common.TransactionCase):
model_name = False
def __init__(self, *args, **kwargs):
super(CreatorCase, self).__init__(*args, **kwargs)
self.model = None
def setUp(self):
super(CreatorCase, self).setUp()
self.model = self.registry(self.model_name)
def make(self, value):
id = self.model.create(self.cr, openerp.SUPERUSER_ID, {'value': value})
return self.model.browse(self.cr, openerp.SUPERUSER_ID, [id])[0]
def export(self, value, fields=('value',), context=None):
record = self.make(value)
return record._BaseModel__export_rows([f.split('/') for f in fields])
class test_boolean_field(CreatorCase):
model_name = 'export.boolean'
def test_true(self):
self.assertEqual(
self.export(True),
[[u'True']])
def test_false(self):
""" ``False`` value to boolean fields is unique in being exported as a
(unicode) string, not a boolean
"""
self.assertEqual(
self.export(False),
[[u'False']])
class test_integer_field(CreatorCase):
model_name = 'export.integer'
def test_empty(self):
self.assertEqual(self.model.search(self.cr, openerp.SUPERUSER_ID, []), [],
"Test model should have no records")
def test_0(self):
self.assertEqual(
self.export(0),
[[False]])
def test_basic_value(self):
self.assertEqual(
self.export(42),
[[u'42']])
def test_negative(self):
self.assertEqual(
self.export(-32),
[[u'-32']])
def test_huge(self):
self.assertEqual(
self.export(2**31-1),
[[unicode(2**31-1)]])
class test_float_field(CreatorCase):
model_name = 'export.float'
def test_0(self):
self.assertEqual(
self.export(0.0),
[[False]])
def test_epsilon(self):
self.assertEqual(
self.export(0.000000000027),
[[u'2.7e-11']])
def test_negative(self):
self.assertEqual(
self.export(-2.42),
[[u'-2.42']])
def test_positive(self):
self.assertEqual(
self.export(47.36),
[[u'47.36']])
def test_big(self):
self.assertEqual(
self.export(87654321.4678),
[[u'87654321.4678']])
class test_decimal_field(CreatorCase):
model_name = 'export.decimal'
def test_0(self):
self.assertEqual(
self.export(0.0),
[[False]])
def test_epsilon(self):
""" epsilon gets sliced to 0 due to precision
"""
self.assertEqual(
self.export(0.000000000027),
[[False]])
def test_negative(self):
self.assertEqual(
self.export(-2.42),
[[u'-2.42']])
def test_positive(self):
self.assertEqual(
self.export(47.36),
[[u'47.36']])
def test_big(self):
self.assertEqual(
self.export(87654321.4678), [[u'87654321.468']])
class test_string_field(CreatorCase):
model_name = 'export.string.bounded'
def test_empty(self):
self.assertEqual(
self.export(""),
[[False]])
def test_within_bounds(self):
self.assertEqual(
self.export("foobar"),
[[u"foobar"]])
def test_out_of_bounds(self):
self.assertEqual(
self.export("C for Sinking, "
"Java for Drinking, "
"Smalltalk for Thinking. "
"...and Power to the Penguin!"),
[[u"C for Sinking, J"]])
class test_unbound_string_field(CreatorCase):
model_name = 'export.string'
def test_empty(self):
self.assertEqual(
self.export(""),
[[False]])
def test_small(self):
self.assertEqual(
self.export("foobar"),
[[u"foobar"]])
def test_big(self):
self.assertEqual(
self.export("We flew down weekly to meet with IBM, but they "
"thought the way to measure software was the amount "
"of code we wrote, when really the better the "
"software, the fewer lines of code."),
[[u"We flew down weekly to meet with IBM, but they thought the "
u"way to measure software was the amount of code we wrote, "
u"when really the better the software, the fewer lines of "
u"code."]])
class test_text(CreatorCase):
model_name = 'export.text'
def test_empty(self):
self.assertEqual(
self.export(""),
[[False]])
def test_small(self):
self.assertEqual(
self.export("foobar"),
[[u"foobar"]])
def test_big(self):
self.assertEqual(
self.export("So, `bind' is `let' and monadic programming is"
" equivalent to programming in the A-normal form. That"
" is indeed all there is to monads"),
[[u"So, `bind' is `let' and monadic programming is equivalent to"
u" programming in the A-normal form. That is indeed all there"
u" is to monads"]])
class test_date(CreatorCase):
model_name = 'export.date'
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_basic(self):
self.assertEqual(
self.export('2011-11-07'),
[[u'2011-11-07']])
class test_datetime(CreatorCase):
model_name = 'export.datetime'
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_basic(self):
self.assertEqual(
self.export('2011-11-07 21:05:48'),
[[u'2011-11-07 21:05:48']])
def test_tz(self):
""" Export ignores the timezone and always exports to UTC
.. note:: on the other hand, export uses user lang for name_get
"""
# NOTE: ignores user timezone, always exports to UTC
self.assertEqual(
self.export('2011-11-07 21:05:48', context={'tz': 'Pacific/Norfolk'}),
[[u'2011-11-07 21:05:48']])
class test_selection(CreatorCase):
model_name = 'export.selection'
translations_fr = [
("Qux", "toto"),
("Bar", "titi"),
("Foo", "tete"),
]
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_value(self):
""" selections export the *label* for their value
"""
self.assertEqual(
self.export(2),
[[u"Bar"]])
def test_localized_export(self):
self.registry('res.lang').create(self.cr, openerp.SUPERUSER_ID, {
'name': u'Français',
'code': 'fr_FR',
'translatable': True,
'date_format': '%d.%m.%Y',
'decimal_point': ',',
'thousands_sep': ' ',
})
Translations = self.registry('ir.translation')
for source, value in self.translations_fr:
Translations.create(self.cr, openerp.SUPERUSER_ID, {
'name': 'export.selection,value',
'lang': 'fr_FR',
'type': 'selection',
'src': source,
'value': value
})
self.assertEqual(
self.export(2, context={'lang': 'fr_FR'}),
[[u'Bar']])
class test_selection_function(CreatorCase):
model_name = 'export.selection.function'
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_value(self):
# FIXME: selection functions export the *value* itself
self.assertEqual(
self.export(1),
[[1]])
self.assertEqual(
self.export(3),
[[3]])
# fucking hell
self.assertEqual(
self.export(0),
[[False]])
class test_m2o(CreatorCase):
model_name = 'export.many2one'
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_basic(self):
""" Exported value is the name_get of the related object
"""
integer_id = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
name = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id]))[integer_id]
self.assertEqual(
self.export(integer_id),
[[name]])
def test_path(self):
""" Can recursively export fields of m2o via path
"""
integer_id = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
self.assertEqual(
self.export(integer_id, fields=['value/.id', 'value/value']),
[[unicode(integer_id), u'42']])
def test_external_id(self):
integer_id = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
# Expecting the m2o target model name in the external id,
# not this model's name
external_id = u'__export__.export_integer_%d' % integer_id
self.assertEqual(
self.export(integer_id, fields=['value/id']),
[[external_id]])
class test_o2m(CreatorCase):
model_name = 'export.one2many'
commands = [
(0, False, {'value': 4, 'str': 'record1'}),
(0, False, {'value': 42, 'str': 'record2'}),
(0, False, {'value': 36, 'str': 'record3'}),
(0, False, {'value': 4, 'str': 'record4'}),
(0, False, {'value': 13, 'str': 'record5'}),
]
names = [
u'export.one2many.child:%d' % d['value']
for c, _, d in commands
]
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_single(self):
self.assertEqual(
self.export([(0, False, {'value': 42})]),
# name_get result
[[u'export.one2many.child:42']])
def test_single_subfield(self):
self.assertEqual(
self.export([(0, False, {'value': 42})],
fields=['value', 'value/value']),
[[u'export.one2many.child:42', u'42']])
def test_integrate_one_in_parent(self):
self.assertEqual(
self.export([(0, False, {'value': 42})],
fields=['const', 'value/value']),
[[u'4', u'42']])
def test_multiple_records(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value/value']),
[
[u'4', u'4'],
[u'', u'42'],
[u'', u'36'],
[u'', u'4'],
[u'', u'13'],
])
def test_multiple_records_name(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value']),
[[
u'4', u','.join(self.names)
]])
def test_multiple_records_id(self):
export = self.export(self.commands, fields=['const', 'value/.id'])
O2M_c = self.registry('export.one2many.child')
ids = O2M_c.browse(self.cr, openerp.SUPERUSER_ID,
O2M_c.search(self.cr, openerp.SUPERUSER_ID, []))
self.assertEqual(
export,
[
['4', str(ids[0].id)],
['', str(ids[1].id)],
['', str(ids[2].id)],
['', str(ids[3].id)],
['', str(ids[4].id)],
])
def test_multiple_records_with_name_before(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value', 'value/value']),
[[ # exports sub-fields of very first o2m
u'4', u','.join(self.names), u'4'
]])
def test_multiple_records_with_name_after(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value/value', 'value']),
[ # completely ignores name_get request
[u'4', u'4', ''],
['', u'42', ''],
['', u'36', ''],
['', u'4', ''],
['', u'13', ''],
])
def test_multiple_subfields_neighbour(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value/str','value/value']),
[
[u'4', u'record1', u'4'],
['', u'record2', u'42'],
['', u'record3', u'36'],
['', u'record4', u'4'],
['', u'record5', u'13'],
])
def test_multiple_subfields_separated(self):
self.assertEqual(
self.export(self.commands, fields=['value/str', 'const', 'value/value']),
[
[u'record1', u'4', u'4'],
[u'record2', '', u'42'],
[u'record3', '', u'36'],
[u'record4', '', u'4'],
[u'record5', '', u'13'],
])
class test_o2m_multiple(CreatorCase):
model_name = 'export.one2many.multiple'
def make(self, value=None, **values):
if value is not None: values['value'] = value
id = self.model.create(self.cr, openerp.SUPERUSER_ID, values)
return self.model.browse(self.cr, openerp.SUPERUSER_ID, [id])[0]
def export(self, value=None, fields=('child1', 'child2',), context=None, **values):
record = self.make(value, **values)
return record._BaseModel__export_rows([f.split('/') for f in fields])
def test_empty(self):
self.assertEqual(
self.export(child1=False, child2=False),
[[False, False]])
def test_single_per_side(self):
self.assertEqual(
self.export(child1=False, child2=[(0, False, {'value': 42})]),
[[False, u'export.one2many.child.2:42']])
self.assertEqual(
self.export(child1=[(0, False, {'value': 43})], child2=False),
[[u'export.one2many.child.1:43', False]])
self.assertEqual(
self.export(child1=[(0, False, {'value': 43})],
child2=[(0, False, {'value': 42})]),
[[u'export.one2many.child.1:43', u'export.one2many.child.2:42']])
def test_single_integrate_subfield(self):
fields = ['const', 'child1/value', 'child2/value']
self.assertEqual(
self.export(child1=False, child2=[(0, False, {'value': 42})],
fields=fields),
[[u'36', False, u'42']])
self.assertEqual(
self.export(child1=[(0, False, {'value': 43})], child2=False,
fields=fields),
[[u'36', u'43', False]])
self.assertEqual(
self.export(child1=[(0, False, {'value': 43})],
child2=[(0, False, {'value': 42})],
fields=fields),
[[u'36', u'43', u'42']])
def test_multiple(self):
""" With two "concurrent" o2ms, exports the first line combined, then
exports the rows for the first o2m, then the rows for the second o2m.
"""
fields = ['const', 'child1/value', 'child2/value']
child1 = [(0, False, {'value': v, 'str': 'record%.02d' % index})
for index, v in zip(itertools.count(), [4, 42, 36, 4, 13])]
child2 = [(0, False, {'value': v, 'str': 'record%.02d' % index})
for index, v in zip(itertools.count(10), [8, 12, 8, 55, 33, 13])]
self.assertEqual(
self.export(child1=child1, child2=False, fields=fields),
[
[u'36', u'4', False],
['', u'42', ''],
['', u'36', ''],
['', u'4', ''],
['', u'13', ''],
])
self.assertEqual(
self.export(child1=False, child2=child2, fields=fields),
[
[u'36', False, u'8'],
['', '', u'12'],
['', '', u'8'],
['', '', u'55'],
['', '', u'33'],
['', '', u'13'],
])
self.assertEqual(
self.export(child1=child1, child2=child2, fields=fields),
[
[u'36', u'4', u'8'],
['', u'42', ''],
['', u'36', ''],
['', u'4', ''],
['', u'13', ''],
['', '', u'12'],
['', '', u'8'],
['', '', u'55'],
['', '', u'33'],
['', '', u'13'],
])
class test_m2m(CreatorCase):
model_name = 'export.many2many'
commands = [
(0, False, {'value': 4, 'str': 'record000'}),
(0, False, {'value': 42, 'str': 'record001'}),
(0, False, {'value': 36, 'str': 'record010'}),
(0, False, {'value': 4, 'str': 'record011'}),
(0, False, {'value': 13, 'str': 'record100'}),
]
names = [
u'export.many2many.other:%d' % d['value']
for c, _, d in commands
]
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_single(self):
self.assertEqual(
self.export([(0, False, {'value': 42})]),
# name_get result
[[u'export.many2many.other:42']])
def test_single_subfield(self):
self.assertEqual(
self.export([(0, False, {'value': 42})],
fields=['value', 'value/value']),
[[u'export.many2many.other:42', u'42']])
def test_integrate_one_in_parent(self):
self.assertEqual(
self.export([(0, False, {'value': 42})],
fields=['const', 'value/value']),
[[u'4', u'42']])
def test_multiple_records(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value/value']),
[
[u'4', u'4'],
[u'', u'42'],
[u'', u'36'],
[u'', u'4'],
[u'', u'13'],
])
def test_multiple_records_name(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value']),
[[ # FIXME: hardcoded comma, import uses config.csv_internal_sep
# resolution: remove configurable csv_internal_sep
u'4', u','.join(self.names)
]])
# essentially same as o2m, so boring
class test_function(CreatorCase):
model_name = 'export.function'
def test_value(self):
""" Exports value normally returned by accessing the function field
"""
self.assertEqual(
self.export(42),
[[u'3']])
| agpl-3.0 |
factorlibre/OCB | openerp/addons/base/tests/test_mail_examples.py | 302 | 57129 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
MISC_HTML_SOURCE = """
<font size="2" style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; ">test1</font>
<div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; font-style: normal; ">
<b>test2</b></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; ">
<i>test3</i></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; ">
<u>test4</u></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; ">
<strike>test5</strike></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; ">
<font size="5">test6</font></div><div><ul><li><font color="#1f1f1f" face="monospace" size="2">test7</font></li><li>
<font color="#1f1f1f" face="monospace" size="2">test8</font></li></ul><div><ol><li><font color="#1f1f1f" face="monospace" size="2">test9</font>
</li><li><font color="#1f1f1f" face="monospace" size="2">test10</font></li></ol></div></div>
<blockquote style="margin: 0 0 0 40px; border: none; padding: 0px;"><div><div><div><font color="#1f1f1f" face="monospace" size="2">
test11</font></div></div></div></blockquote><blockquote style="margin: 0 0 0 40px; border: none; padding: 0px;">
<blockquote style="margin: 0 0 0 40px; border: none; padding: 0px;"><div><font color="#1f1f1f" face="monospace" size="2">
test12</font></div><div><font color="#1f1f1f" face="monospace" size="2"><br></font></div></blockquote></blockquote>
<font color="#1f1f1f" face="monospace" size="2"><a href="http://google.com">google</a></font>
<a href="javascript:alert('malicious code')">test link</a>
"""
EDI_LIKE_HTML_SOURCE = """<div style="font-family: 'Lucica Grande', Ubuntu, Arial, Verdana, sans-serif; font-size: 12px; color: rgb(34, 34, 34); background-color: #FFF; ">
<p>Hello ${object.partner_id.name},</p>
<p>A new invoice is available for you: </p>
<p style="border-left: 1px solid #8e0000; margin-left: 30px;">
<strong>REFERENCES</strong><br />
Invoice number: <strong>${object.number}</strong><br />
Invoice total: <strong>${object.amount_total} ${object.currency_id.name}</strong><br />
Invoice date: ${object.date_invoice}<br />
Order reference: ${object.origin}<br />
Your contact: <a href="mailto:${object.user_id.email or ''}?subject=Invoice%20${object.number}">${object.user_id.name}</a>
</p>
<br/>
<p>It is also possible to directly pay with Paypal:</p>
<a style="margin-left: 120px;" href="${object.paypal_url}">
<img class="oe_edi_paypal_button" src="https://www.paypal.com/en_US/i/btn/btn_paynowCC_LG.gif"/>
</a>
<br/>
<p>If you have any question, do not hesitate to contact us.</p>
<p>Thank you for choosing ${object.company_id.name or 'us'}!</p>
<br/>
<br/>
<div style="width: 375px; margin: 0px; padding: 0px; background-color: #8E0000; border-top-left-radius: 5px 5px; border-top-right-radius: 5px 5px; background-repeat: repeat no-repeat;">
<h3 style="margin: 0px; padding: 2px 14px; font-size: 12px; color: #DDD;">
<strong style="text-transform:uppercase;">${object.company_id.name}</strong></h3>
</div>
<div style="width: 347px; margin: 0px; padding: 5px 14px; line-height: 16px; background-color: #F2F2F2;">
<span style="color: #222; margin-bottom: 5px; display: block; ">
${object.company_id.street}<br/>
${object.company_id.street2}<br/>
${object.company_id.zip} ${object.company_id.city}<br/>
${object.company_id.state_id and ('%s, ' % object.company_id.state_id.name) or ''} ${object.company_id.country_id.name or ''}<br/>
</span>
<div style="margin-top: 0px; margin-right: 0px; margin-bottom: 0px; margin-left: 0px; padding-top: 0px; padding-right: 0px; padding-bottom: 0px; padding-left: 0px; ">
Phone: ${object.company_id.phone}
</div>
<div>
Web : <a href="${object.company_id.website}">${object.company_id.website}</a>
</div>
</div>
</div></body></html>"""
OERP_WEBSITE_HTML_1 = """
<div>
<div class="container">
<div class="row">
<div class="col-md-12 text-center mt16 mb16" data-snippet-id="colmd">
<h2>OpenERP HR Features</h2>
<h3 class="text-muted">Manage your company most important asset: People</h3>
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-rounded img-responsive" src="/website/static/src/img/china_thumb.jpg">
<h4 class="mt16">Streamline Recruitments</h4>
<p>Post job offers and keep track of each application received. Follow applicants in your recruitment process with the smart kanban view.</p>
<p>Save time by automating some communications with email templates. Resumes are indexed automatically, allowing you to easily find for specific profiles.</p>
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-rounded img-responsive" src="/website/static/src/img/desert_thumb.jpg">
<h4 class="mt16">Enterprise Social Network</h4>
<p>Break down information silos. Share knowledge and best practices amongst all employees. Follow specific people or documents and join groups of interests to share expertise and documents.</p>
<p>Interact with your collegues in real time with live chat.</p>
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-rounded img-responsive" src="/website/static/src/img/deers_thumb.jpg">
<h4 class="mt16">Leaves Management</h4>
<p>Keep track of the vacation days accrued by each employee. Employees enter their requests (paid holidays, sick leave, etc), for managers to approve and validate. It's all done in just a few clicks. The agenda of each employee is updated accordingly.</p>
</div>
</div>
</div>
</div>"""
OERP_WEBSITE_HTML_1_IN = [
'Manage your company most important asset: People',
'img class="img-rounded img-responsive" src="/website/static/src/img/china_thumb.jpg"',
]
OERP_WEBSITE_HTML_1_OUT = [
'Break down information silos.',
'Keep track of the vacation days accrued by each employee',
'img class="img-rounded img-responsive" src="/website/static/src/img/deers_thumb.jpg',
]
OERP_WEBSITE_HTML_2 = """
<div class="mt16 cke_widget_editable cke_widget_element oe_editable oe_dirty" data-oe-model="blog.post" data-oe-id="6" data-oe-field="content" data-oe-type="html" data-oe-translate="0" data-oe-expression="blog_post.content" data-cke-widget-data="{}" data-cke-widget-keep-attr="0" data-widget="oeref" contenteditable="true" data-cke-widget-editable="text">
<section class="mt16 mb16" data-snippet-id="text-block">
<div class="container">
<div class="row">
<div class="col-md-12 text-center mt16 mb32" data-snippet-id="colmd">
<h2>
OpenERP Project Management
</h2>
<h3 class="text-muted">Infinitely flexible. Incredibly easy to use.</h3>
</div>
<div class="col-md-12 mb16 mt16" data-snippet-id="colmd">
<p>
OpenERP's <b>collaborative and realtime</b> project
management helps your team get work done. Keep
track of everything, from the big picture to the
minute details, from the customer contract to the
billing.
</p><p>
Organize projects around <b>your own processes</b>. Work
on tasks and issues using the kanban view, schedule
tasks using the gantt chart and control deadlines
in the calendar view. Every project may have it's
own stages allowing teams to optimize their job.
</p>
</div>
</div>
</div>
</section>
<section class="" data-snippet-id="image-text">
<div class="container">
<div class="row">
<div class="col-md-6 mt16 mb16" data-snippet-id="colmd">
<img class="img-responsive shadow" src="/website/static/src/img/image_text.jpg">
</div>
<div class="col-md-6 mt32" data-snippet-id="colmd">
<h3>Manage Your Shops</h3>
<p>
OpenERP's Point of Sale introduces a super clean
interface with no installation required that runs
online and offline on modern hardwares.
</p><p>
It's full integration with the company inventory
and accounting, gives you real time statistics and
consolidations amongst all shops without the hassle
of integrating several applications.
</p>
</div>
</div>
</div>
</section>
<section class="" data-snippet-id="text-image">
<div class="container">
<div class="row">
<div class="col-md-6 mt32" data-snippet-id="colmd">
<h3>Enterprise Social Network</h3>
<p>
Make every employee feel more connected and engaged
with twitter-like features for your own company. Follow
people, share best practices, 'like' top ideas, etc.
</p><p>
Connect with experts, follow what interests you, share
documents and promote best practices with OpenERP
Social application. Get work done with effective
collaboration across departments, geographies
and business applications.
</p>
</div>
<div class="col-md-6 mt16 mb16" data-snippet-id="colmd">
<img class="img-responsive shadow" src="/website/static/src/img/text_image.png">
</div>
</div>
</div>
</section><section class="" data-snippet-id="portfolio">
<div class="container">
<div class="row">
<div class="col-md-12 text-center mt16 mb32" data-snippet-id="colmd">
<h2>Our Porfolio</h2>
<h4 class="text-muted">More than 500 successful projects</h4>
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/deers.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/desert.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/china.jpg">
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/desert.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/china.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/deers.jpg">
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/landscape.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/china.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/desert.jpg">
</div>
</div>
</div>
</section>
</div>
"""
OERP_WEBSITE_HTML_2_IN = [
'management helps your team get work done',
]
OERP_WEBSITE_HTML_2_OUT = [
'Make every employee feel more connected',
'img class="img-responsive shadow" src="/website/static/src/img/text_image.png',
]
TEXT_1 = """I contact you about our meeting tomorrow. Here is the schedule I propose:
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?
--
MySignature"""
TEXT_1_IN = ["""I contact you about our meeting tomorrow. Here is the schedule I propose:
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?"""]
TEXT_1_OUT = ["""--
MySignature"""]
TEXT_2 = """Salut Raoul!
Le 28 oct. 2012 à 00:02, Raoul Grosbedon a écrit :
> I contact you about our meeting tomorrow. Here is the schedule I propose: (quote)
Of course. This seems viable.
> 2012/10/27 Bert Tartopoils :
>> blahblahblah (quote)?
>>
>> blahblahblah (quote)
>>
>> Bert TARTOPOILS
>> bert.tartopoils@miam.miam
>>
>
>
> --
> RaoulSignature
Bert TARTOPOILS
bert.tartopoils@miam.miam
"""
TEXT_2_IN = ["Salut Raoul!", "Of course. This seems viable."]
TEXT_2_OUT = ["I contact you about our meeting tomorrow. Here is the schedule I propose: (quote)",
"""> 2012/10/27 Bert Tartopoils :
>> blahblahblah (quote)?
>>
>> blahblahblah (quote)
>>
>> Bert TARTOPOILS
>> bert.tartopoils@miam.miam
>>
>
>
> --
> RaoulSignature"""]
HTML_1 = """<p>I contact you about our meeting for tomorrow. Here is the schedule I propose: (keep)
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?
--
MySignature</p>"""
HTML_1_IN = ["""I contact you about our meeting for tomorrow. Here is the schedule I propose: (keep)
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?"""]
HTML_1_OUT = ["""--
MySignature"""]
HTML_2 = """<div>
<font><span>I contact you about our meeting for tomorrow. Here is the schedule I propose:</span></font>
</div>
<div>
<ul>
<li><span>9 AM: brainstorming about our new amazing business app</span></li>
<li><span>9.45 AM: summary</span></li>
<li><span>10 AM: meeting with Fabien to present our app</span></li>
</ul>
</div>
<div>
<font><span>Is everything ok for you ?</span></font>
</div>"""
HTML_2_IN = ["<font><span>I contact you about our meeting for tomorrow. Here is the schedule I propose:</span></font>",
"<li><span>9 AM: brainstorming about our new amazing business app</span></li>",
"<li><span>9.45 AM: summary</span></li>",
"<li><span>10 AM: meeting with Fabien to present our app</span></li>",
"<font><span>Is everything ok for you ?</span></font>"]
HTML_2_OUT = []
HTML_3 = """<div><pre>This is an answer.
Regards,
XXXXXX
----- Mail original -----</pre>
<pre>Hi,
My CRM-related question.
Regards,
XXXX</pre></div>"""
HTML_3_IN = ["""<div><pre>This is an answer.
Regards,
XXXXXX
----- Mail original -----</pre>"""]
HTML_3_OUT = ["Hi,", "My CRM-related question.",
"Regards,"]
HTML_4 = """
<div>
<div>Hi Nicholas,</div>
<br>
<div>I'm free now. 00447710085916.</div>
<br>
<div>Regards,</div>
<div>Nicholas</div>
<br>
<span id="OLK_SRC_BODY_SECTION">
<div style="font-family:Calibri; font-size:11pt; text-align:left; color:black; BORDER-BOTTOM: medium none; BORDER-LEFT: medium none; PADDING-BOTTOM: 0in; PADDING-LEFT: 0in; PADDING-RIGHT: 0in; BORDER-TOP: #b5c4df 1pt solid; BORDER-RIGHT: medium none; PADDING-TOP: 3pt">
<span style="font-weight:bold">From: </span>OpenERP Enterprise <<a href="mailto:sales@openerp.com">sales@openerp.com</a>><br><span style="font-weight:bold">Reply-To: </span><<a href="mailto:sales@openerp.com">sales@openerp.com</a>><br><span style="font-weight:bold">Date: </span>Wed, 17 Apr 2013 13:30:47 +0000<br><span style="font-weight:bold">To: </span>Microsoft Office User <<a href="mailto:n.saxlund@babydino.com">n.saxlund@babydino.com</a>><br><span style="font-weight:bold">Subject: </span>Re: your OpenERP.com registration<br>
</div>
<br>
<div>
<p>Hello Nicholas Saxlund, </p>
<p>I noticed you recently registered to our OpenERP Online solution. </p>
<p>You indicated that you wish to use OpenERP in your own company. We would like to know more about your your business needs and requirements, and see how we can help you. When would you be available to discuss your project ?
</p>
<p>Best regards, </p>
<pre><a href="http://openerp.com">http://openerp.com</a>
Belgium: +32.81.81.37.00
U.S.: +1 (650) 307-6736
India: +91 (79) 40 500 100
</pre>
</div>
</span>
</div>"""
HTML_5 = """<div><pre>Hi,
I have downloaded OpenERP installer 7.0 and successfully installed the postgresql server and the OpenERP.
I created a database and started to install module by log in as administrator.
However, I was not able to install any module due to "OpenERP Server Error" as shown in the attachement.
Could you please let me know how could I fix this problem?
Regards,
Goh Sin Yih
________________________________
From: OpenERP Enterprise <sales@openerp.com>
To: sinyih_goh@yahoo.com
Sent: Friday, February 8, 2013 12:46 AM
Subject: Feedback From Your OpenERP Trial
Hello Goh Sin Yih,
Thank you for having tested OpenERP Online.
I noticed you started a trial of OpenERP Online (gsy) but you did not decide to keep using it.
So, I just wanted to get in touch with you to get your feedback. Can you tell me what kind of application you were you looking for and why you didn't decide to continue with OpenERP?
Thanks in advance for providing your feedback,
Do not hesitate to contact me if you have any questions,
Thanks,
</pre>"""
GMAIL_1 = """Hello,<div><br></div><div>Ok for me. I am replying directly in gmail, without signature.</div><div><br></div><div>Kind regards,</div><div><br></div><div>Demo.<br><br><div>On Thu, Nov 8, 2012 at 5:29 PM, <span><<a href="mailto:dummy@example.com">dummy@example.com</a>></span> wrote:<br><blockquote><div>I contact you about our meeting for tomorrow. Here is the schedule I propose:</div><div><ul><li>9 AM: brainstorming about our new amazing business app</span></li></li>
<li>9.45 AM: summary</li><li>10 AM: meeting with Fabien to present our app</li></ul></div><div>Is everything ok for you ?</div>
<div><p>--<br>Administrator</p></div>
<div><p>Log in our portal at: <a href="http://localhost:8069#action=login&db=mail_1&login=demo">http://localhost:8069#action=login&db=mail_1&login=demo</a></p></div>
</blockquote></div><br></div>"""
GMAIL_1_IN = ['Ok for me. I am replying directly in gmail, without signature.']
GMAIL_1_OUT = ['Administrator', 'Log in our portal at:']
THUNDERBIRD_1 = """<div>On 11/08/2012 05:29 PM,
<a href="mailto:dummy@example.com">dummy@example.com</a> wrote:<br></div>
<blockquote>
<div>I contact you about our meeting for tomorrow. Here is the
schedule I propose:</div>
<div>
<ul><li>9 AM: brainstorming about our new amazing business
app</span></li></li>
<li>9.45 AM: summary</li>
<li>10 AM: meeting with Fabien to present our app</li>
</ul></div>
<div>Is everything ok for you ?</div>
<div>
<p>--<br>
Administrator</p>
</div>
<div>
<p>Log in our portal at:
<a href="http://localhost:8069#action=login&db=mail_1&token=rHdWcUART5PhEnJRaXjH">http://localhost:8069#action=login&db=mail_1&token=rHdWcUART5PhEnJRaXjH</a></p>
</div>
</blockquote>
Ok for me. I am replying directly below your mail, using Thunderbird, with a signature.<br><br>
Did you receive my email about my new laptop, by the way ?<br><br>
Raoul.<br><pre>--
Raoul Grosbedonnée
</pre>"""
THUNDERBIRD_1_IN = ['Ok for me. I am replying directly below your mail, using Thunderbird, with a signature.']
THUNDERBIRD_1_OUT = ['I contact you about our meeting for tomorrow.', 'Raoul Grosbedon']
HOTMAIL_1 = """<div>
<div dir="ltr"><br>
I have an amazing company, i'm learning OpenERP, it is a small company yet, but plannig to grow up quickly.
<br> <br>Kindest regards,<br>xxx<br>
<div>
<div id="SkyDrivePlaceholder">
</div>
<hr id="stopSpelling">
Subject: Re: your OpenERP.com registration<br>From: xxx@xxx.xxx<br>To: xxx@xxx.xxx<br>Date: Wed, 27 Mar 2013 17:12:12 +0000
<br><br>
Hello xxx,
<br>
I noticed you recently created an OpenERP.com account to access OpenERP Apps.
<br>
You indicated that you wish to use OpenERP in your own company.
We would like to know more about your your business needs and requirements, and see how
we can help you. When would you be available to discuss your project ?<br>
Best regards,<br>
<pre>
<a href="http://openerp.com" target="_blank">http://openerp.com</a>
Belgium: +32.81.81.37.00
U.S.: +1 (650) 307-6736
India: +91 (79) 40 500 100
</pre>
</div>
</div>
</div>"""
HOTMAIL_1_IN = ["I have an amazing company, i'm learning OpenERP, it is a small company yet, but plannig to grow up quickly."]
HOTMAIL_1_OUT = ["Subject: Re: your OpenERP.com registration", " I noticed you recently created an OpenERP.com account to access OpenERP Apps.",
"We would like to know more about your your business needs and requirements", "Belgium: +32.81.81.37.00"]
MSOFFICE_1 = """
<div>
<div class="WordSection1">
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
Our requirements are simple. Just looking to replace some spreadsheets for tracking quotes and possibly using the timecard module.
We are a company of 25 engineers providing product design services to clients.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
I’ll install on a windows server and run a very limited trial to see how it works.
If we adopt OpenERP we will probably move to Linux or look for a hosted SaaS option.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
<br>
I am also evaluating Adempiere and maybe others.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span>
</p>
<p> </p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
I expect the trial will take 2-3 months as this is not a high priority for us.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span>
</p>
<p> </p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
Alan
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span>
</p>
<p> </p>
<p></p>
<div>
<div style="border:none;border-top:solid #B5C4DF 1.0pt;padding:3.0pt 0in 0in 0in">
<p class="MsoNormal">
<b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
From:
</span></b>
<span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
OpenERP Enterprise [mailto:sales@openerp.com]
<br><b>Sent:</b> Monday, 11 March, 2013 14:47<br><b>To:</b> Alan Widmer<br><b>Subject:</b> Re: your OpenERP.com registration
</span>
</p>
<p></p>
<p></p>
</div>
</div>
<p class="MsoNormal"></p>
<p> </p>
<p>Hello Alan Widmer, </p>
<p></p>
<p>I noticed you recently downloaded OpenERP. </p>
<p></p>
<p>
Uou mentioned you wish to use OpenERP in your own company. Please let me more about your
business needs and requirements? When will you be available to discuss about your project?
</p>
<p></p>
<p>Thanks for your interest in OpenERP, </p>
<p></p>
<p>Feel free to contact me if you have any questions, </p>
<p></p>
<p>Looking forward to hear from you soon. </p>
<p></p>
<pre><p> </p></pre>
<pre>--<p></p></pre>
<pre>Nicolas<p></p></pre>
<pre><a href="http://openerp.com">http://openerp.com</a><p></p></pre>
<pre>Belgium: +32.81.81.37.00<p></p></pre>
<pre>U.S.: +1 (650) 307-6736<p></p></pre>
<pre>India: +91 (79) 40 500 100<p></p></pre>
<pre> <p></p></pre>
</div>
</div>"""
MSOFFICE_1_IN = ['Our requirements are simple. Just looking to replace some spreadsheets for tracking quotes and possibly using the timecard module.']
MSOFFICE_1_OUT = ['I noticed you recently downloaded OpenERP.', 'Uou mentioned you wish to use OpenERP in your own company.', 'Belgium: +32.81.81.37.00']
MSOFFICE_2 = """
<div>
<div class="WordSection1">
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Nicolas,</span></p><p></p>
<p></p>
<p class="MsoNormal" style="text-indent:.5in">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">We are currently investigating the possibility of moving away from our current ERP </span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Thank You</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Matt</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<div>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Raoul Petitpoil</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Poil Industries</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Information Technology</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">920 Super Street</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Sanchez, Pa 17046 USA</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Tel: xxx.xxx</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Fax: xxx.xxx</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Email: </span>
<a href="mailto:raoul@petitpoil.com">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:blue">raoul@petitpoil.com</span>
</a>
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">www.poilindustries.com</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">www.superproducts.com</span></p><p></p>
<p></p>
</div>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<div>
<div style="border:none;border-top:solid #B5C4DF 1.0pt;padding:3.0pt 0in 0in 0in">
<p class="MsoNormal">
<b>
<span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">From:</span>
</b>
<span style="font-size:10.0pt;font-family:"Tahoma","sans-serif""> OpenERP Enterprise [mailto:sales@openerp.com] <br><b>Sent:</b> Wednesday, April 17, 2013 1:31 PM<br><b>To:</b> Matt Witters<br><b>Subject:</b> Re: your OpenERP.com registration</span></p><p></p>
<p></p>
</div>
</div>
<p class="MsoNormal"></p>
<p> </p>
<p>Hello Raoul Petitpoil, </p>
<p></p>
<p>I noticed you recently downloaded OpenERP. </p>
<p></p>
<p>You indicated that you wish to use OpenERP in your own company. We would like to know more about your your business needs and requirements, and see how we can help you. When would you be available to discuss your project ? </p>
<p></p>
<p>Best regards, </p>
<p></p>
<pre> <p> </p>
</pre>
<pre>--<p></p></pre>
<pre>Nicolas<p></p></pre>
<pre> <a href="http://openerp.com">http://openerp.com</a>
<p></p>
</pre>
<pre>Belgium: +32.81.81.37.00<p></p></pre>
<pre>U.S.: +1 (650) 307-6736<p></p></pre>
<pre>India: +91 (79) 40 500 100<p></p></pre>
<pre> <p></p></pre>
</div>
</div>"""
MSOFFICE_2_IN = ['We are currently investigating the possibility']
MSOFFICE_2_OUT = ['I noticed you recently downloaded OpenERP.', 'You indicated that you wish', 'Belgium: +32.81.81.37.00']
MSOFFICE_3 = """<div>
<div class="WordSection1">
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Hi Nicolas !</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<p class="MsoNormal">
<span lang="EN-US" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Yes I’d be glad to hear about your offers as we struggle every year with the planning/approving of LOA. </span></p><p></p>
<p></p>
<p class="MsoNormal">
<span lang="EN-US" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">I saw your boss yesterday on tv and immediately wanted to test the interface. </span></p><p></p>
<p></p>
<p class="MsoNormal">
<span lang="EN-US" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<div>
<p class="MsoNormal">
<b>
<span lang="NL-BE" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">Bien à vous, </span></b></p><p></p><b>
</b>
<p></p>
<p class="MsoNormal">
<b>
<span lang="NL-BE" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">Met vriendelijke groeten, </span></b></p><p></p><b>
</b>
<p></p>
<p class="MsoNormal">
<b>
<span lang="EN-GB" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">Best regards,</span></b></p><p></p><b>
</b>
<p></p>
<p class="MsoNormal">
<b>
<span lang="EN-GB" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">
</span></b></p><p><b> </b></p><b>
</b>
<p></p>
<p class="MsoNormal">
<b>
<span lang="EN-GB" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">R. Petitpoil <br></span>
</b>
<span lang="EN-GB" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">Human Resource Manager<b><br><br>Field Resource s.a n.v. <i> <br></i></b>Hermesstraat 6A <br>1930 Zaventem</span>
<span lang="EN-GB" style="font-size:8.0pt;font-family:"Tahoma","sans-serif";color:gray"><br></span>
<b>
<span lang="FR" style="font-size:10.0pt;font-family:Wingdings;color:#1F497D">(</span>
</b>
<b>
<span lang="FR" style="font-size:9.0pt;font-family:Wingdings;color:#1F497D"> </span>
</b>
<b>
<span lang="EN-GB" style="font-size:8.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">xxx.xxx </span>
</b>
<b>
<span lang="EN-GB" style="font-size:9.0pt;font-family:"Trebuchet MS","sans-serif";color:gray"><br></span>
</b>
<b>
<span lang="FR" style="font-size:10.0pt;font-family:"Wingdings 2";color:#1F497D">7</span>
</b>
<b>
<span lang="FR" style="font-size:9.0pt;font-family:"Wingdings 2";color:#1F497D"> </span>
</b>
<b>
<span lang="EN-GB" style="font-size:8.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">+32 2 727.05.91<br></span>
</b>
<span lang="EN-GB" style="font-size:24.0pt;font-family:Webdings;color:green">P</span>
<span lang="EN-GB" style="font-size:8.0pt;font-family:"Tahoma","sans-serif";color:green"> <b> </b></span>
<b>
<span lang="EN-GB" style="font-size:9.0pt;font-family:"Trebuchet MS","sans-serif";color:green">Please consider the environment before printing this email.</span>
</b>
<span lang="EN-GB" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:navy"> </span>
<span lang="EN-GB" style="font-family:"Calibri","sans-serif";color:navy">
</span></p><p></p>
<p></p>
</div>
<p class="MsoNormal">
<span lang="EN-US" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<div>
<div style="border:none;border-top:solid #B5C4DF 1.0pt;padding:3.0pt 0cm 0cm 0cm">
<p class="MsoNormal">
<b>
<span lang="FR" style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">De :</span>
</b>
<span lang="FR" style="font-size:10.0pt;font-family:"Tahoma","sans-serif""> OpenERP Enterprise [mailto:sales@openerp.com] <br><b>Envoyé :</b> jeudi 18 avril 2013 11:31<br><b>À :</b> Paul Richard<br><b>Objet :</b> Re: your OpenERP.com registration</span></p><p></p>
<p></p>
</div>
</div>
<p class="MsoNormal"></p>
<p> </p>
<p>Hello Raoul PETITPOIL, </p>
<p></p>
<p>I noticed you recently registered to our OpenERP Online solution. </p>
<p></p>
<p>You indicated that you wish to use OpenERP in your own company. We would like to know more about your your business needs and requirements, and see how we can help you. When would you be available to discuss your project ? </p>
<p></p>
<p>Best regards, </p>
<p></p>
<pre> <p> </p>
</pre>
<pre>--<p></p></pre>
<pre>Nicolas<p></p></pre>
<pre> <a href="http://openerp.com">http://openerp.com</a>
<p></p>
</pre>
<pre>Belgium: +32.81.81.37.00<p></p></pre>
<pre>U.S.: +1 (650) 307-6736<p></p></pre>
<pre>India: +91 (79) 40 500 100<p></p></pre>
<pre> <p></p></pre>
</div>
</div>"""
MSOFFICE_3_IN = ['I saw your boss yesterday']
MSOFFICE_3_OUT = ['I noticed you recently downloaded OpenERP.', 'You indicated that you wish', 'Belgium: +32.81.81.37.00']
# ------------------------------------------------------------
# Test cases coming from bugs
# ------------------------------------------------------------
# bug: read more not apparent, strange message in read more span
BUG1 = """<pre>Hi Migration Team,
Paragraph 1, blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah.
Paragraph 2, blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah.
Paragraph 3, blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah.
Thanks.
Regards,
--
Olivier Laurent
Migration Manager
OpenERP SA
Chaussée de Namur, 40
B-1367 Gérompont
Tel: +32.81.81.37.00
Web: http://www.openerp.com</pre>"""
BUG_1_IN = [
'Hi Migration Team',
'Paragraph 1'
]
BUG_1_OUT = [
'Olivier Laurent',
'Chaussée de Namur',
'81.81.37.00',
'openerp.com',
]
BUG2 = """
<div>
<br>
<div class="moz-forward-container"><br>
<br>
-------- Original Message --------
<table class="moz-email-headers-table" border="0" cellpadding="0" cellspacing="0">
<tbody>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Subject:
</th>
<td>Fwd: TR: OpenERP S.A. Payment Reminder</td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Date: </th>
<td>Wed, 16 Oct 2013 14:11:13 +0200</td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">From: </th>
<td>Christine Herrmann <a class="moz-txt-link-rfc2396E" href="mailto:che@openerp.com"><che@openerp.com></a></td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">To: </th>
<td><a class="moz-txt-link-abbreviated" href="mailto:online@openerp.com">online@openerp.com</a></td>
</tr>
</tbody>
</table>
<br>
<br>
<br>
<div class="moz-forward-container"><br>
<br>
-------- Message original --------
<table class="moz-email-headers-table" border="0" cellpadding="0" cellspacing="0">
<tbody>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Sujet:
</th>
<td>TR: OpenERP S.A. Payment Reminder</td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Date :
</th>
<td>Wed, 16 Oct 2013 10:34:45 -0000</td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">De : </th>
<td>Ida Siwatala <a class="moz-txt-link-rfc2396E" href="mailto:infos@inzoservices.com"><infos@inzoservices.com></a></td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Répondre
à : </th>
<td><a class="moz-txt-link-abbreviated" href="mailto:catchall@mail.odoo.com">catchall@mail.odoo.com</a></td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Pour :
</th>
<td>Christine Herrmann (che) <a class="moz-txt-link-rfc2396E" href="mailto:che@openerp.com"><che@openerp.com></a></td>
</tr>
</tbody>
</table>
<br>
<br>
<div>
<div class="WordSection1">
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Bonjour,</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Pourriez-vous
me faire un retour sur ce point.</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Cordialement</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<div>
<div style="border:none;border-top:solid #B5C4DF
1.0pt;padding:3.0pt 0cm 0cm 0cm">
<p class="MsoNormal"><b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">De :</span></b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
Ida Siwatala [<a class="moz-txt-link-freetext" href="mailto:infos@inzoservices.com">mailto:infos@inzoservices.com</a>]
<br>
<b>Envoyé :</b> vendredi 4 octobre 2013 20:03<br>
<b>À :</b> 'Followers of
INZO-services-8-all-e-Maxime-Lisbonne-77176-Savigny-le-temple-France'<br>
<b>Objet :</b> RE: OpenERP S.A. Payment Reminder</span></p>
</div>
</div>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Bonsoir,</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Je
me permets de revenir vers vous par écrit , car j’ai
fait 2 appels vers votre service en exposant mon
problème, mais je n’ai pas eu de retour.</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Cela
fait un mois que j’ai fait la souscription de votre
produit, mais je me rends compte qu’il est pas adapté à
ma situation ( fonctionnalité manquante et surtout je
n’ai pas beaucoup de temps à passer à résoudre des
bugs). </span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">C’est
pourquoi , j’ai demandé qu’un accord soit trouvé avec
vous pour annuler le contrat (tout en vous payant le
mois d’utilisation de septembre).</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Pourriez-vous
me faire un retour sur ce point.</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Cordialement,</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Ida
Siwatala</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">De :</span></b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
<a href="mailto:che@openerp.com">che@openerp.com</a>
[<a href="mailto:che@openerp.com">mailto:che@openerp.com</a>]
<br>
<b>Envoyé :</b> vendredi 4 octobre 2013 17:41<br>
<b>À :</b> <a href="mailto:infos@inzoservices.com">infos@inzoservices.com</a><br>
<b>Objet :</b> OpenERP S.A. Payment Reminder</span></p>
<p> </p>
<div>
<p style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222">Dear
INZO services,</span></p>
<p style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222">Exception
made if there was a mistake of ours, it seems that the
following amount stays unpaid. Please, take
appropriate measures in order to carry out this
payment in the next 8 days. </span></p>
<p class="MsoNormal" style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222"></span></p>
<p> </p>
<table class="MsoNormalTable" style="width:100.0%;border:outset 1.5pt" width="100%" border="1" cellpadding="0">
<tbody>
<tr>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Date de facturation</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Description</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Reference</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Due Date</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Amount (€)</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Lit.</p>
</td>
</tr>
<tr>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>2013-09-24</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>2013/1121</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>Enterprise - Inzo Services
- Juillet 2013</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>2013-09-24</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>420.0</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt"><br>
</td>
</tr>
<tr>
<td style="padding:.75pt .75pt .75pt .75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
</tr>
</tbody>
</table>
<p class="MsoNormal" style="text-align:center;background:white" align="center"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222">Amount
due : 420.00 € </span></p>
<p style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222">Would
your payment have been carried out after this mail was
sent, please ignore this message. Do not hesitate to
contact our accounting department. </span></p>
<p class="MsoNormal" style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222"><br>
Best Regards, <br>
Aurore Lesage <br>
OpenERP<br>
Chaussée de Namur, 40 <br>
B-1367 Grand Rosières <br>
Tel: +32.81.81.37.00 - Fax: +32.81.73.35.01 <br>
E-mail : <a href="mailto:ale@openerp.com">ale@openerp.com</a> <br>
Web: <a href="http://www.openerp.com">http://www.openerp.com</a></span></p>
</div>
</div>
</div>
--<br>
INZO services <small>Sent by <a style="color:inherit" href="http://www.openerp.com">OpenERP
S.A.</a> using <a style="color:inherit" href="https://www.openerp.com/">OpenERP</a>.</small>
<small>Access your messages and documents <a style="color:inherit" href="https://accounts.openerp.com?db=openerp#action=mail.action_mail_redirect&login=che&message_id=5750830">in
OpenERP</a></small> <br>
<pre class="moz-signature" cols="72">--
Christine Herrmann
OpenERP
Chaussée de Namur, 40
B-1367 Grand Rosières
Tel: +32.81.81.37.00 - Fax: +32.81.73.35.01
Web: <a class="moz-txt-link-freetext" href="http://www.openerp.com">http://www.openerp.com</a> </pre>
<br>
</div>
<br>
<br>
</div>
<br>
</div>"""
BUG_2_IN = [
'read more',
'...',
]
BUG_2_OUT = [
'Fwd: TR: OpenERP S.A'
'fait un mois'
]
# BUG 20/08/2014: READ MORE NOT APPEARING
BUG3 = """<div class="oe_msg_body_long" style="/* display: none; */"><p>OpenERP has been upgraded to version 8.0.</p>
<h2>What's new in this upgrade?</h2>
<div class="document">
<ul>
<li><p class="first">New Warehouse Management System:</p>
<blockquote>
<p>Schedule your picking, packing, receptions and internal moves automatically with Odoo using
your own routing rules. Define push and pull rules to organize a warehouse or to manage
product moves between several warehouses. Track in detail all stock moves, not only in your
warehouse but wherever else it's taken as well (customers, suppliers or manufacturing
locations).</p>
</blockquote>
</li>
<li><p class="first">New Product Configurator</p>
</li>
<li><p class="first">Documentation generation from website forum:</p>
<blockquote>
<p>New module to generate a documentation from questions and responses from your forum.
The documentation manager can define a table of content and any user, depending their karma,
can link a question to an entry of this TOC.</p>
</blockquote>
</li>
<li><p class="first">New kanban view of documents (resumes and letters in recruitement, project documents...)</p>
</li>
<li><p class="first">E-Commerce:</p>
<blockquote>
<ul class="simple">
<li>Manage TIN in contact form for B2B.</li>
<li>Dedicated salesteam to easily manage leads and orders.</li>
</ul>
</blockquote>
</li>
<li><p class="first">Better Instant Messaging.</p>
</li>
<li><p class="first">Faster and Improved Search view: Search drawer now appears on top of the results, and is open
by default in reporting views</p>
</li>
<li><p class="first">Improved User Interface:</p>
<blockquote>
<ul class="simple">
<li>Popups has changed to be more responsive on tablets and smartphones.</li>
<li>New Stat Buttons: Forms views have now dynamic buttons showing some statistics abouts linked models.</li>
<li>Color code to check in one look availability of components in an MRP order.</li>
<li>Unified menu bar allows you to switch easily between the frontend (website) and backend</li>
<li>Results panel is now scrollable independently of the menu bars, keeping the navigation,
search bar and view switcher always within reach.</li>
</ul>
</blockquote>
</li>
<li><p class="first">User signature is now in HTML.</p>
</li>
<li><p class="first">New development API.</p>
</li>
<li><p class="first">Remove support for Outlook and Thunderbird plugins</p>
</li>
</ul>
</div>
<p>Enjoy the new OpenERP Online!</p><span class="oe_mail_reduce"><a href="#">read less</a></span></div>"""
BUG_3_IN = [
'read more',
'...',
]
BUG_3_OUT = [
'New kanban view of documents'
]
| agpl-3.0 |
leighpauls/k2cro4 | v8/test/benchmarks/testcfg.py | 11 | 3348 | # Copyright 2011 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import test
import os
from os.path import join, split
def GetSuite(name, root):
# Not implemented.
return None
def IsNumber(string):
try:
float(string)
return True
except ValueError:
return False
class BenchmarkTestCase(test.TestCase):
def __init__(self, path, context, mode):
super(BenchmarkTestCase, self).__init__(context, split(path), mode)
self.root = path
def GetLabel(self):
return '%s benchmark %s' % (self.mode, self.GetName())
def IsFailureOutput(self, output):
if output.exit_code != 0:
return True
lines = output.stdout.splitlines()
for line in lines:
colon_index = line.find(':')
if colon_index >= 0:
if not IsNumber(line[colon_index+1:].strip()):
return True
return False
def GetCommand(self):
result = self.context.GetVmCommand(self, self.mode)
result.append(join(self.root, 'run.js'))
return result
def GetName(self):
return 'V8'
def BeforeRun(self):
os.chdir(self.root)
def AfterRun(self, result):
os.chdir(self.context.buildspace)
def GetSource(self):
return open(join(self.root, 'run.js')).read()
def GetCustomFlags(self, mode):
return []
class BenchmarkTestConfiguration(test.TestConfiguration):
def __init__(self, context, root):
super(BenchmarkTestConfiguration, self).__init__(context, root)
def ListTests(self, current_path, path, mode, variant_flags):
path = self.context.workspace
path = join(path, 'benchmarks')
test = BenchmarkTestCase(path, self.context, mode)
return [test]
def GetBuildRequirements(self):
return ['d8']
def GetTestStatus(self, sections, defs):
pass
def GetConfiguration(context, root):
return BenchmarkTestConfiguration(context, root)
| bsd-3-clause |
bikong2/django | django/contrib/gis/gdal/base.py | 654 | 1179 | from ctypes import c_void_p
from django.contrib.gis.gdal.error import GDALException
from django.utils import six
class GDALBase(object):
"""
Base object for GDAL objects that has a pointer access property
that controls access to the underlying C pointer.
"""
# Initially the pointer is NULL.
_ptr = None
# Default allowed pointer type.
ptr_type = c_void_p
# Pointer access property.
def _get_ptr(self):
# Raise an exception if the pointer isn't valid don't
# want to be passing NULL pointers to routines --
# that's very bad.
if self._ptr:
return self._ptr
else:
raise GDALException('GDAL %s pointer no longer valid.' % self.__class__.__name__)
def _set_ptr(self, ptr):
# Only allow the pointer to be set with pointers of the
# compatible type or None (NULL).
if isinstance(ptr, six.integer_types):
self._ptr = self.ptr_type(ptr)
elif ptr is None or isinstance(ptr, self.ptr_type):
self._ptr = ptr
else:
raise TypeError('Incompatible pointer type')
ptr = property(_get_ptr, _set_ptr)
| bsd-3-clause |
paweljasinski/ironpython3 | Tests/compat/sbs_parse_string.py | 3 | 4400 | #####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
from common import *
prefixs = ['', 'r', 'u', 'ur', 'R', 'U', 'UR', 'Ur', 'uR']
def generate_str1():
s = "[ \n"
for prefix in prefixs :
for x in range(5):
s += " %s\"a%su0020B\", \n" % (prefix, "\\" * x)
for prefix in prefixs :
for x in range(5):
s += " %s\"A%stb\", \n" % (prefix, "\\" * x)
for prefix in prefixs :
for x in range(5):
s += " %s\"a%sx34b\", \n" % (prefix, "\\" * x)
for prefix in prefixs :
for x in range(5):
for y in range(5):
s + " %s\"A%su0020%stB\", \n" % (prefix, "\\" * x, "\\"*y)
s += "] \n"
return s
class test_parse_string(object):
# always valid \u \t sequence
def test_scenario1(self):
list_string = eval(generate_str1())
for x in list_string:
print len(x)
print x
# could be invalid \ sequence
def test_scenario2(self):
for prefix in prefixs :
for template in [
"%s\"%s\"", # "\\\\\"
"%s\'%sa\'", # '\\\\a'
"%s\'a%sb\'", # 'a\\\b'
"%s\'\u%s\'",
"%s\'\u0%s\'",
"%s\'\u00%s\'",
"%s\'\u002%s\'",
"%s\'\u0020%s\'",
"%s\'\\u%s\'",
"%s\'\\u0%s\'",
"%s\'\\u00%s\'",
"%s\'\\u002%s\'",
"%s\'\\u0020%s\'",
"%s\'%s\u\'",
"%s\'%s\u0\'",
"%s\'%s\u00\'",
"%s\'%s\u002\'",
"%s\'%s\u0020\'",
"%s\'\\u002%s\'",
] :
for x in range(10):
line = template % (prefix, "\\" * x)
try:
printwith("case", line)
str = eval(line)
print len(str)
print str
except:
print "exception"
######################################################################################
def apply_format(s, l, onlypos = False):
for y in l:
for x in (y-1, y, y+1):
printwith("case", s, x)
printwith("same", s % x)
if onlypos: continue
printwith("case", s, -1 * x)
printwith("same", s % (-1 * x))
class test_formating(object):
def test_formating1(self):
for ss in ['+0', '-0', '+ 0', '- 0', '0', ' 0', '+', '-', '+ ', '- ', ' +', ' -','#', "#0", "+#0"]:
for i in (1, 2, 3):
for k in ('d', ):
s = "%" + ss + str(i) + k
apply_format(s, [0, 10, 100, 1000, 10000, 1234567890123456])
for k in ('u', 'x', 'o', 'X'):
s = "%" + ss + str(i) + k
apply_format(s, [0, 10, 100, 1000, 10000, 1234567890123456], True)
for i in ('8.2', '8.3', '7.2', '3.2', '.2'):
s = "%" + ss + i + "f"
apply_format(s, [0, 10, 100, 1000, 10000, 0.01, 0.1, 1.01, 100.3204])
# reasonal difference?
def test_formating2(self):
for ss in ['+0', '-0', '+ 0', '- 0', '0', ' 0', '+', '-', '+ ', '- ', ' +', ' -','#', "#0", "+#0"]:
for i in ('8.', '8.0', '8.1'):
s = "%" + ss + i + "f"
apply_format(s, [0, 10, 100, 1000, 10000, 0.01, 0.1, 1.01, 100.3204])
runtests(test_parse_string)
runtests(test_formating)
| apache-2.0 |
alsrgv/tensorflow | tensorflow/python/ops/batch_norm_benchmark.py | 25 | 10767 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""End-to-end benchmark for batch normalization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import time
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def batch_norm_op(tensor, mean, variance, beta, gamma, scale):
"""Fused kernel for batch normalization."""
# _batch_norm_with_global_normalization is deprecated in v9
test_util.set_producer_version(ops.get_default_graph(), 8)
# pylint: disable=protected-access
return gen_nn_ops._batch_norm_with_global_normalization(
tensor, mean, variance, beta, gamma, 0.001, scale)
# pylint: enable=protected-access
# Note that the naive implementation is much slower:
# batch_norm = (tensor - mean) * tf.math.rsqrt(variance + 0.001)
# if scale:
# batch_norm *= gamma
# return batch_norm + beta
def batch_norm_py(tensor, mean, variance, beta, gamma, scale):
"""Python implementation of batch normalization."""
return nn_impl.batch_normalization(tensor, mean, variance, beta, gamma if
scale else None, 0.001)
def batch_norm_slow(tensor, mean, variance, beta, gamma, scale):
batch_norm = (tensor - mean) * math_ops.rsqrt(variance + 0.001)
if scale:
batch_norm *= gamma
return batch_norm + beta
def build_graph(device, input_shape, axes, num_layers, mode, scale, train):
"""Build a graph containing a sequence of batch normalizations.
Args:
device: string, the device to run on.
input_shape: shape of the input tensor.
axes: axes that are to be normalized across.
num_layers: number of batch normalization layers in the graph.
mode: "op", "py" or "slow" depending on the implementation.
scale: scale after normalization.
train: if true, also run backprop.
Returns:
An array of tensors to run()
"""
moment_shape = []
keep_dims = mode == "py" or mode == "slow"
if keep_dims:
for axis in range(len(input_shape)):
if axis in axes:
moment_shape.append(1)
else:
moment_shape.append(input_shape[axis])
else:
for axis in range(len(input_shape)):
if axis not in axes:
moment_shape.append(input_shape[axis])
with ops.device("/%s:0" % device):
tensor = variables.Variable(random_ops.truncated_normal(input_shape))
for _ in range(num_layers):
if train:
mean, variance = nn_impl.moments(tensor, axes, keep_dims=keep_dims)
else:
mean = array_ops.zeros(moment_shape)
variance = array_ops.ones(moment_shape)
beta = variables.Variable(array_ops.zeros(moment_shape))
gamma = variables.Variable(constant_op.constant(1.0, shape=moment_shape))
if mode == "py":
tensor = batch_norm_py(tensor, mean, variance, beta, gamma, scale)
elif mode == "op":
tensor = batch_norm_op(tensor, mean, variance, beta, gamma, scale)
elif mode == "slow":
tensor = batch_norm_slow(tensor, mean, variance, beta, gamma, scale)
if train:
return gradients_impl.gradients([tensor], variables.trainable_variables())
else:
return [tensor]
def print_difference(mode, t1, t2):
"""Print the difference in timing between two runs."""
difference = (t2 - t1) / t1 * 100.0
print("=== %s: %.1f%% ===" % (mode, difference))
class BatchNormBenchmark(test.Benchmark):
"""Benchmark batch normalization."""
def _run_graph(self, device, input_shape, axes, num_layers, mode, scale,
train, num_iters):
"""Run the graph and print its execution time.
Args:
device: string, the device to run on.
input_shape: shape of the input tensor.
axes: axes that are to be normalized across.
num_layers: number of batch normalization layers in the graph.
mode: "op", "py" or "slow" depending on the implementation.
scale: scale after normalization.
train: if true, also run backprop.
num_iters: number of steps to run.
Returns:
The duration of the run in seconds.
"""
graph = ops.Graph()
with graph.as_default():
outputs = build_graph(device, input_shape, axes, num_layers, mode, scale,
train)
with session_lib.Session(graph=graph) as session:
variables.global_variables_initializer().run()
_ = session.run([out.op for out in outputs]) # warm up.
start_time = time.time()
for _ in range(num_iters):
_ = session.run([out.op for out in outputs])
duration = time.time() - start_time
print("%s shape:%d/%d #layers:%d mode:%s scale:%r train:%r - %f secs" %
(device, len(input_shape), len(axes), num_layers, mode, scale, train,
duration / num_iters))
name_template = (
"batch_norm_{device}_input_shape_{shape}_axes_{axes}_mode_{mode}_"
"layers_{num_layers}_scale_{scale}_"
"train_{train}")
self.report_benchmark(
name=name_template.format(
device=device,
mode=mode,
num_layers=num_layers,
scale=scale,
train=train,
shape=str(input_shape).replace(" ", ""),
axes=str(axes)).replace(" ", ""),
iters=num_iters,
wall_time=duration / num_iters)
return duration
def benchmark_batch_norm(self):
print("Forward convolution (lower layers).")
shape = [8, 128, 128, 32]
axes = [0, 1, 2]
t1 = self._run_graph("cpu", shape, axes, 10, "op", True, False, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "py", True, False, 5)
t3 = self._run_graph("cpu", shape, axes, 10, "slow", True, False, 5)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "op", True, False, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "py", True, False, 50)
t3 = self._run_graph("gpu", shape, axes, 10, "slow", True, False, 50)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
print("Forward/backward convolution (lower layers).")
t1 = self._run_graph("cpu", shape, axes, 10, "op", True, True, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "py", True, True, 5)
t3 = self._run_graph("cpu", shape, axes, 10, "slow", True, True, 5)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "op", True, True, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "py", True, True, 50)
t3 = self._run_graph("gpu", shape, axes, 10, "slow", True, True, 50)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
print("Forward convolution (higher layers).")
shape = [256, 17, 17, 32]
axes = [0, 1, 2]
t1 = self._run_graph("cpu", shape, axes, 10, "op", True, False, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "py", True, False, 5)
t3 = self._run_graph("cpu", shape, axes, 10, "slow", True, False, 5)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "op", True, False, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "py", True, False, 50)
t3 = self._run_graph("gpu", shape, axes, 10, "slow", True, False, 50)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
print("Forward/backward convolution (higher layers).")
t1 = self._run_graph("cpu", shape, axes, 10, "op", True, True, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "py", True, True, 5)
t3 = self._run_graph("cpu", shape, axes, 10, "slow", True, True, 5)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "op", True, True, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "py", True, True, 50)
t3 = self._run_graph("gpu", shape, axes, 10, "slow", True, True, 50)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
print("Forward fully-connected.")
shape = [1024, 32]
axes = [0]
t1 = self._run_graph("cpu", shape, axes, 10, "py", True, False, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "slow", True, False, 5)
print_difference("py vs slow", t1, t2)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "py", True, False, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "slow", True, False, 50)
print_difference("py vs slow", t1, t2)
print("Forward/backward fully-connected.")
t1 = self._run_graph("cpu", shape, axes, 10, "py", True, True, 50)
t2 = self._run_graph("cpu", shape, axes, 10, "slow", True, True, 50)
print_difference("py vs slow", t1, t2)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "py", True, True, 5)
t2 = self._run_graph("gpu", shape, axes, 10, "slow", True, True, 5)
print_difference("py vs slow", t1, t2)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--use_gpu",
type="bool",
nargs="?",
const=True,
default=True,
help="Run GPU benchmarks."
)
global FLAGS # pylint:disable=global-at-module-level
FLAGS, unparsed = parser.parse_known_args()
test.main(argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
suiyuan2009/tensorflow | tensorflow/examples/how_tos/reading_data/fully_connected_reader.py | 59 | 7434 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train and Eval the MNIST network.
This version is like fully_connected_feed.py but uses data converted
to a TFRecords file containing tf.train.Example protocol buffers.
See:
https://www.tensorflow.org/programmers_guide/reading_data#reading_from_files
for context.
YOU MUST run convert_to_records before running this (but you only need to
run it once).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import mnist
# Basic model parameters as external flags.
FLAGS = None
# Constants used for dealing with the files, matches convert_to_records.
TRAIN_FILE = 'train.tfrecords'
VALIDATION_FILE = 'validation.tfrecords'
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
})
# Convert from a scalar string tensor (whose single string has
# length mnist.IMAGE_PIXELS) to a uint8 tensor with shape
# [mnist.IMAGE_PIXELS].
image = tf.decode_raw(features['image_raw'], tf.uint8)
image.set_shape([mnist.IMAGE_PIXELS])
# OPTIONAL: Could reshape into a 28x28 image and apply distortions
# here. Since we are not applying any distortions in this
# example, and the next step expects the image to be flattened
# into a vector, we don't bother.
# Convert from [0, 255] -> [-0.5, 0.5] floats.
image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
# Convert label from a scalar uint8 tensor to an int32 scalar.
label = tf.cast(features['label'], tf.int32)
return image, label
def inputs(train, batch_size, num_epochs):
"""Reads input data num_epochs times.
Args:
train: Selects between the training (True) and validation (False) data.
batch_size: Number of examples per returned batch.
num_epochs: Number of times to read the input data, or 0/None to
train forever.
Returns:
A tuple (images, labels), where:
* images is a float tensor with shape [batch_size, mnist.IMAGE_PIXELS]
in the range [-0.5, 0.5].
* labels is an int32 tensor with shape [batch_size] with the true label,
a number in the range [0, mnist.NUM_CLASSES).
Note that an tf.train.QueueRunner is added to the graph, which
must be run using e.g. tf.train.start_queue_runners().
"""
if not num_epochs: num_epochs = None
filename = os.path.join(FLAGS.train_dir,
TRAIN_FILE if train else VALIDATION_FILE)
with tf.name_scope('input'):
filename_queue = tf.train.string_input_producer(
[filename], num_epochs=num_epochs)
# Even when reading in multiple threads, share the filename
# queue.
image, label = read_and_decode(filename_queue)
# Shuffle the examples and collect them into batch_size batches.
# (Internally uses a RandomShuffleQueue.)
# We run this in two threads to avoid being a bottleneck.
images, sparse_labels = tf.train.shuffle_batch(
[image, label], batch_size=batch_size, num_threads=2,
capacity=1000 + 3 * batch_size,
# Ensures a minimum amount of shuffling of examples.
min_after_dequeue=1000)
return images, sparse_labels
def run_training():
"""Train MNIST for a number of steps."""
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
# Input images and labels.
images, labels = inputs(train=True, batch_size=FLAGS.batch_size,
num_epochs=FLAGS.num_epochs)
# Build a Graph that computes predictions from the inference model.
logits = mnist.inference(images,
FLAGS.hidden1,
FLAGS.hidden2)
# Add to the Graph the loss calculation.
loss = mnist.loss(logits, labels)
# Add to the Graph operations that train the model.
train_op = mnist.training(loss, FLAGS.learning_rate)
# The op for initializing the variables.
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
# Create a session for running operations in the Graph.
sess = tf.Session()
# Initialize the variables (the trained variables and the
# epoch counter).
sess.run(init_op)
# Start input enqueue threads.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
step = 0
while not coord.should_stop():
start_time = time.time()
# Run one step of the model. The return values are
# the activations from the `train_op` (which is
# discarded) and the `loss` op. To inspect the values
# of your ops or variables, you may include them in
# the list passed to sess.run() and the value tensors
# will be returned in the tuple from the call.
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
# Print an overview fairly often.
if step % 100 == 0:
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value,
duration))
step += 1
except tf.errors.OutOfRangeError:
print('Done training for %d epochs, %d steps.' % (FLAGS.num_epochs, step))
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
def main(_):
run_training()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='Initial learning rate.'
)
parser.add_argument(
'--num_epochs',
type=int,
default=2,
help='Number of epochs to run trainer.'
)
parser.add_argument(
'--hidden1',
type=int,
default=128,
help='Number of units in hidden layer 1.'
)
parser.add_argument(
'--hidden2',
type=int,
default=32,
help='Number of units in hidden layer 2.'
)
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='Batch size.'
)
parser.add_argument(
'--train_dir',
type=str,
default='/tmp/data',
help='Directory with the training data.'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
psyhofreak/ft-engine | mysql-test/suite/tokudb/t/change_column_char.py | 54 | 1339 | #!/usr/bin/env python
import sys
def gen_test(n):
print "CREATE TABLE t (a CHAR(%d));" % (n)
for v in [ 'hi', 'there', 'people' ]:
print "INSERT INTO t VALUES ('%s');" % (v)
for i in range(2,256):
if i < n:
print "--replace_regex /MariaDB/XYZ/ /MySQL/XYZ/"
print "--error ER_UNSUPPORTED_EXTENSION"
else:
print "CREATE TABLE ti LIKE t;"
print "ALTER TABLE ti ENGINE=myisam;"
print "INSERT INTO ti SELECT * FROM t;"
print "ALTER TABLE ti CHANGE COLUMN a a CHAR(%d);" % (i)
print "ALTER TABLE t CHANGE COLUMN a a CHAR(%d);" % (i)
if i >= n:
print "let $diff_tables=test.t, test.ti;"
print "source include/diff_tables.inc;"
print "DROP TABLE ti;"
print "DROP TABLE t;"
def main():
print "# this test is generated by change_char.py"
print "# test char expansion"
print "--disable_warnings"
print "DROP TABLE IF EXISTS t,ti;"
print "--enable_warnings"
print "SET SESSION DEFAULT_STORAGE_ENGINE=\"TokuDB\";"
print "SET SESSION TOKUDB_DISABLE_SLOW_ALTER=1;"
# all n takes too long to run, so here is a subset of tests
for n in [ 1, 2, 3, 4, 5, 6, 7, 8, 16, 31, 32, 63, 64, 127, 128, 254, 255 ]:
gen_test(n)
return 0
sys.exit(main())
| gpl-2.0 |
takeflight/django | django/core/serializers/__init__.py | 121 | 8167 | """
Interfaces for serializing Django objects.
Usage::
from django.core import serializers
json = serializers.serialize("json", some_queryset)
objects = list(serializers.deserialize("json", json))
To add your own serializers, use the SERIALIZATION_MODULES setting::
SERIALIZATION_MODULES = {
"csv": "path.to.csv.serializer",
"txt": "path.to.txt.serializer",
}
"""
import importlib
from django.apps import apps
from django.conf import settings
from django.utils import six
from django.core.serializers.base import SerializerDoesNotExist
# Built-in serializers
BUILTIN_SERIALIZERS = {
"xml": "django.core.serializers.xml_serializer",
"python": "django.core.serializers.python",
"json": "django.core.serializers.json",
"yaml": "django.core.serializers.pyyaml",
}
_serializers = {}
class BadSerializer(object):
"""
Stub serializer to hold exception raised during registration
This allows the serializer registration to cache serializers and if there
is an error raised in the process of creating a serializer it will be
raised and passed along to the caller when the serializer is used.
"""
internal_use_only = False
def __init__(self, exception):
self.exception = exception
def __call__(self, *args, **kwargs):
raise self.exception
def register_serializer(format, serializer_module, serializers=None):
"""Register a new serializer.
``serializer_module`` should be the fully qualified module name
for the serializer.
If ``serializers`` is provided, the registration will be added
to the provided dictionary.
If ``serializers`` is not provided, the registration will be made
directly into the global register of serializers. Adding serializers
directly is not a thread-safe operation.
"""
if serializers is None and not _serializers:
_load_serializers()
try:
module = importlib.import_module(serializer_module)
except ImportError as exc:
bad_serializer = BadSerializer(exc)
module = type('BadSerializerModule', (object,), {
'Deserializer': bad_serializer,
'Serializer': bad_serializer,
})
if serializers is None:
_serializers[format] = module
else:
serializers[format] = module
def unregister_serializer(format):
"Unregister a given serializer. This is not a thread-safe operation."
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
del _serializers[format]
def get_serializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Serializer
def get_serializer_formats():
if not _serializers:
_load_serializers()
return list(_serializers)
def get_public_serializer_formats():
if not _serializers:
_load_serializers()
return [k for k, v in six.iteritems(_serializers) if not v.Serializer.internal_use_only]
def get_deserializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Deserializer
def serialize(format, queryset, **options):
"""
Serialize a queryset (or any iterator that returns database objects) using
a certain serializer.
"""
s = get_serializer(format)()
s.serialize(queryset, **options)
return s.getvalue()
def deserialize(format, stream_or_string, **options):
"""
Deserialize a stream or a string. Returns an iterator that yields ``(obj,
m2m_relation_dict)``, where ``obj`` is an instantiated -- but *unsaved* --
object, and ``m2m_relation_dict`` is a dictionary of ``{m2m_field_name :
list_of_related_objects}``.
"""
d = get_deserializer(format)
return d(stream_or_string, **options)
def _load_serializers():
"""
Register built-in and settings-defined serializers. This is done lazily so
that user code has a chance to (e.g.) set up custom settings without
needing to be careful of import order.
"""
global _serializers
serializers = {}
for format in BUILTIN_SERIALIZERS:
register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)
if hasattr(settings, "SERIALIZATION_MODULES"):
for format in settings.SERIALIZATION_MODULES:
register_serializer(format, settings.SERIALIZATION_MODULES[format], serializers)
_serializers = serializers
def sort_dependencies(app_list):
"""Sort a list of (app_config, models) pairs into a single list of models.
The single list of models is sorted so that any model with a natural key
is serialized before a normal model, and any model with a natural key
dependency has it's dependencies serialized first.
"""
# Process the list of models, and get the list of dependencies
model_dependencies = []
models = set()
for app_config, model_list in app_list:
if model_list is None:
model_list = app_config.get_models()
for model in model_list:
models.add(model)
# Add any explicitly defined dependencies
if hasattr(model, 'natural_key'):
deps = getattr(model.natural_key, 'dependencies', [])
if deps:
deps = [apps.get_model(dep) for dep in deps]
else:
deps = []
# Now add a dependency for any FK relation with a model that
# defines a natural key
for field in model._meta.fields:
if hasattr(field.rel, 'to'):
rel_model = field.rel.to
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
# Also add a dependency for any simple M2M relation with a model
# that defines a natural key. M2M relations with explicit through
# models don't count as dependencies.
for field in model._meta.many_to_many:
if field.rel.through._meta.auto_created:
rel_model = field.rel.to
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
model_dependencies.append((model, deps))
model_dependencies.reverse()
# Now sort the models to ensure that dependencies are met. This
# is done by repeatedly iterating over the input list of models.
# If all the dependencies of a given model are in the final list,
# that model is promoted to the end of the final list. This process
# continues until the input list is empty, or we do a full iteration
# over the input models without promoting a model to the final list.
# If we do a full iteration without a promotion, that means there are
# circular dependencies in the list.
model_list = []
while model_dependencies:
skipped = []
changed = False
while model_dependencies:
model, deps = model_dependencies.pop()
# If all of the models in the dependency list are either already
# on the final model list, or not on the original serialization list,
# then we've found another model with all it's dependencies satisfied.
found = True
for candidate in ((d not in models or d in model_list) for d in deps):
if not candidate:
found = False
if found:
model_list.append(model)
changed = True
else:
skipped.append((model, deps))
if not changed:
raise RuntimeError("Can't resolve dependencies for %s in serialized app list." %
', '.join('%s.%s' % (model._meta.app_label, model._meta.object_name)
for model, deps in sorted(skipped, key=lambda obj: obj[0].__name__))
)
model_dependencies = skipped
return model_list
| bsd-3-clause |
PhiInnovations/mdp28-linux-bsp | meta-openembedded/meta-oe/recipes-devtools/python/python-pyyaml/setup.py | 69 | 1936 | NAME = 'PyYAML'
VERSION = '3.06'
DESCRIPTION = "YAML parser and emitter for Python"
LONG_DESCRIPTION = """\
YAML is a data serialization format designed for human readability and
interaction with scripting languages. PyYAML is a YAML parser and
emitter for Python.
PyYAML features a complete YAML 1.1 parser, Unicode support, pickle
support, capable extension API, and sensible error messages. PyYAML
supports standard YAML tags and provides Python-specific tags that allow
to represent an arbitrary Python object.
PyYAML is applicable for a broad range of tasks from complex
configuration files to object serialization and persistance."""
AUTHOR = "Kirill Simonov"
AUTHOR_EMAIL = 'xi@resolvent.net'
LICENSE = "MIT"
PLATFORMS = "Any"
URL = "http://pyyaml.org/wiki/PyYAML"
DOWNLOAD_URL = "http://pyyaml.org/download/pyyaml/%s-%s.tar.gz" % (NAME, VERSION)
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Markup",
]
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import sys, os.path
if __name__ == '__main__':
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
platforms=PLATFORMS,
url=URL,
download_url=DOWNLOAD_URL,
classifiers=CLASSIFIERS,
package_dir={'': 'lib'},
packages=['yaml'],
ext_modules = [
Extension( "_yaml", ["ext/_yaml.pyx"], libraries = ["yaml"] )
],
cmdclass={
'build_ext': build_ext,
},
)
| mit |
Grogdor/CouchPotatoServer | couchpotato/core/settings.py | 42 | 8457 | from __future__ import with_statement
import ConfigParser
from hashlib import md5
from CodernityDB.hash_index import HashIndex
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import mergeDicts, tryInt, tryFloat
class Settings(object):
options = {}
types = {}
def __init__(self):
addApiView('settings', self.view, docs = {
'desc': 'Return the options and its values of settings.conf. Including the default values and group ordering used on the settings page.',
'return': {'type': 'object', 'example': """{
// objects like in __init__.py of plugin
"options": {
"moovee" : {
"groups" : [{
"description" : "SD movies only",
"name" : "#alt.binaries.moovee",
"options" : [{
"default" : false,
"name" : "enabled",
"type" : "enabler"
}],
"tab" : "providers"
}],
"name" : "moovee"
}
},
// object structured like settings.conf
"values": {
"moovee": {
"enabled": false
}
}
}"""}
})
addApiView('settings.save', self.saveView, docs = {
'desc': 'Save setting to config file (settings.conf)',
'params': {
'section': {'desc': 'The section name in settings.conf'},
'name': {'desc': 'The option name'},
'value': {'desc': 'The value you want to save'},
}
})
addEvent('database.setup', self.databaseSetup)
self.file = None
self.p = None
self.log = None
def setFile(self, config_file):
self.file = config_file
self.p = ConfigParser.RawConfigParser()
self.p.read(config_file)
from couchpotato.core.logger import CPLog
self.log = CPLog(__name__)
self.connectEvents()
def databaseSetup(self):
fireEvent('database.setup_index', 'property', PropertyIndex)
def parser(self):
return self.p
def sections(self):
return self.p.sections()
def connectEvents(self):
addEvent('settings.options', self.addOptions)
addEvent('settings.register', self.registerDefaults)
addEvent('settings.save', self.save)
def registerDefaults(self, section_name, options = None, save = True):
if not options: options = {}
self.addSection(section_name)
for option_name, option in options.items():
self.setDefault(section_name, option_name, option.get('default', ''))
# Migrate old settings from old location to the new location
if option.get('migrate_from'):
if self.p.has_option(option.get('migrate_from'), option_name):
previous_value = self.p.get(option.get('migrate_from'), option_name)
self.p.set(section_name, option_name, previous_value)
self.p.remove_option(option.get('migrate_from'), option_name)
if option.get('type'):
self.setType(section_name, option_name, option.get('type'))
if save:
self.save()
def set(self, section, option, value):
return self.p.set(section, option, value)
def get(self, option = '', section = 'core', default = None, type = None):
try:
try: type = self.types[section][option]
except: type = 'unicode' if not type else type
if hasattr(self, 'get%s' % type.capitalize()):
return getattr(self, 'get%s' % type.capitalize())(section, option)
else:
return self.getUnicode(section, option)
except:
return default
def delete(self, option = '', section = 'core'):
self.p.remove_option(section, option)
self.save()
def getEnabler(self, section, option):
return self.getBool(section, option)
def getBool(self, section, option):
try:
return self.p.getboolean(section, option)
except:
return self.p.get(section, option) == 1
def getInt(self, section, option):
try:
return self.p.getint(section, option)
except:
return tryInt(self.p.get(section, option))
def getFloat(self, section, option):
try:
return self.p.getfloat(section, option)
except:
return tryFloat(self.p.get(section, option))
def getUnicode(self, section, option):
value = self.p.get(section, option).decode('unicode_escape')
return toUnicode(value).strip()
def getValues(self):
values = {}
for section in self.sections():
values[section] = {}
for option in self.p.items(section):
(option_name, option_value) = option
is_password = False
try: is_password = self.types[section][option_name] == 'password'
except: pass
values[section][option_name] = self.get(option_name, section)
if is_password and values[section][option_name]:
values[section][option_name] = len(values[section][option_name]) * '*'
return values
def save(self):
with open(self.file, 'wb') as configfile:
self.p.write(configfile)
self.log.debug('Saved settings')
def addSection(self, section):
if not self.p.has_section(section):
self.p.add_section(section)
def setDefault(self, section, option, value):
if not self.p.has_option(section, option):
self.p.set(section, option, value)
def setType(self, section, option, type):
if not self.types.get(section):
self.types[section] = {}
self.types[section][option] = type
def addOptions(self, section_name, options):
if not self.options.get(section_name):
self.options[section_name] = options
else:
self.options[section_name] = mergeDicts(self.options[section_name], options)
def getOptions(self):
return self.options
def view(self, **kwargs):
return {
'options': self.getOptions(),
'values': self.getValues()
}
def saveView(self, **kwargs):
section = kwargs.get('section')
option = kwargs.get('name')
value = kwargs.get('value')
# See if a value handler is attached, use that as value
new_value = fireEvent('setting.save.%s.%s' % (section, option), value, single = True)
self.set(section, option, (new_value if new_value else value).encode('unicode_escape'))
self.save()
# After save (for re-interval etc)
fireEvent('setting.save.%s.%s.after' % (section, option), single = True)
fireEvent('setting.save.%s.*.after' % section, single = True)
return {
'success': True,
}
def getProperty(self, identifier):
from couchpotato import get_db
db = get_db()
prop = None
try:
propert = db.get('property', identifier, with_doc = True)
prop = propert['doc']['value']
except:
pass # self.log.debug('Property "%s" doesn\'t exist: %s', (identifier, traceback.format_exc(0)))
return prop
def setProperty(self, identifier, value = ''):
from couchpotato import get_db
db = get_db()
try:
p = db.get('property', identifier, with_doc = True)
p['doc'].update({
'identifier': identifier,
'value': toUnicode(value),
})
db.update(p['doc'])
except:
db.insert({
'_t': 'property',
'identifier': identifier,
'value': toUnicode(value),
})
class PropertyIndex(HashIndex):
_version = 1
def __init__(self, *args, **kwargs):
kwargs['key_format'] = '32s'
super(PropertyIndex, self).__init__(*args, **kwargs)
def make_key(self, key):
return md5(key).hexdigest()
def make_key_value(self, data):
if data.get('_t') == 'property':
return md5(data['identifier']).hexdigest(), None
| gpl-3.0 |
markuskont/salt-tick | vagrant/lib/win-x64/M2Crypto/RSA.py | 8 | 13128 | """M2Crypto wrapper for OpenSSL RSA API.
Copyright (c) 1999-2004 Ng Pheng Siong. All rights reserved."""
import sys
import util, BIO, Err, m2
class RSAError(Exception): pass
m2.rsa_init(RSAError)
no_padding = m2.no_padding
pkcs1_padding = m2.pkcs1_padding
sslv23_padding = m2.sslv23_padding
pkcs1_oaep_padding = m2.pkcs1_oaep_padding
class RSA:
"""
RSA Key Pair.
"""
m2_rsa_free = m2.rsa_free
def __init__(self, rsa, _pyfree=0):
assert m2.rsa_type_check(rsa), "'rsa' type error"
self.rsa = rsa
self._pyfree = _pyfree
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_rsa_free(self.rsa)
def __len__(self):
return m2.rsa_size(self.rsa) << 3
def __getattr__(self, name):
if name == 'e':
return m2.rsa_get_e(self.rsa)
elif name == 'n':
return m2.rsa_get_n(self.rsa)
else:
raise AttributeError
def pub(self):
assert self.check_key(), 'key is not initialised'
return m2.rsa_get_e(self.rsa), m2.rsa_get_n(self.rsa)
def public_encrypt(self, data, padding):
assert self.check_key(), 'key is not initialised'
return m2.rsa_public_encrypt(self.rsa, data, padding)
def public_decrypt(self, data, padding):
assert self.check_key(), 'key is not initialised'
return m2.rsa_public_decrypt(self.rsa, data, padding)
def private_encrypt(self, data, padding):
assert self.check_key(), 'key is not initialised'
return m2.rsa_private_encrypt(self.rsa, data, padding)
def private_decrypt(self, data, padding):
assert self.check_key(), 'key is not initialised'
return m2.rsa_private_decrypt(self.rsa, data, padding)
def save_key_bio(self, bio, cipher='aes_128_cbc', callback=util.passphrase_callback):
"""
Save the key pair to an M2Crypto.BIO.BIO object in PEM format.
@type bio: M2Crypto.BIO.BIO
@param bio: M2Crypto.BIO.BIO object to save key to.
@type cipher: string
@param cipher: Symmetric cipher to protect the key. The default
cipher is 'aes_128_cbc'. If cipher is None, then the key is saved
in the clear.
@type callback: Python callable
@param callback: A Python callable object that is invoked
to acquire a passphrase with which to protect the key.
The default is util.passphrase_callback.
"""
if cipher is None:
return m2.rsa_write_key_no_cipher(self.rsa, bio._ptr(), callback)
else:
ciph = getattr(m2, cipher, None)
if ciph is None:
raise RSAError, 'not such cipher %s' % cipher
else:
ciph = ciph()
return m2.rsa_write_key(self.rsa, bio._ptr(), ciph, callback)
def save_key(self, file, cipher='aes_128_cbc', callback=util.passphrase_callback):
"""
Save the key pair to a file in PEM format.
@type file: string
@param file: Name of file to save key to.
@type cipher: string
@param cipher: Symmetric cipher to protect the key. The default
cipher is 'aes_128_cbc'. If cipher is None, then the key is saved
in the clear.
@type callback: Python callable
@param callback: A Python callable object that is invoked
to acquire a passphrase with which to protect the key.
The default is util.passphrase_callback.
"""
bio = BIO.openfile(file, 'wb')
return self.save_key_bio(bio, cipher, callback)
save_pem = save_key
def as_pem(self, cipher='aes_128_cbc', callback=util.passphrase_callback):
"""
Returns the key(pair) as a string in PEM format.
"""
bio = BIO.MemoryBuffer()
self.save_key_bio(bio, cipher, callback)
return bio.read()
def save_key_der_bio(self, bio):
"""
Save the key pair to an M2Crypto.BIO.BIO object in DER format.
@type bio: M2Crypto.BIO.BIO
@param bio: M2Crypto.BIO.BIO object to save key to.
"""
return m2.rsa_write_key_der(self.rsa, bio._ptr())
def save_key_der(self, file):
"""
Save the key pair to a file in DER format.
@type file: str
@param file: Filename to save key to
"""
bio = BIO.openfile(file, 'wb')
return self.save_key_der_bio(bio)
def save_pub_key_bio(self, bio):
"""
Save the public key to an M2Crypto.BIO.BIO object in PEM format.
@type bio: M2Crypto.BIO.BIO
@param bio: M2Crypto.BIO.BIO object to save key to.
"""
return m2.rsa_write_pub_key(self.rsa, bio._ptr())
def save_pub_key(self, file):
"""
Save the public key to a file in PEM format.
@type file: string
@param file: Name of file to save key to.
"""
bio = BIO.openfile(file, 'wb')
return m2.rsa_write_pub_key(self.rsa, bio._ptr())
def check_key(self):
return m2.rsa_check_key(self.rsa)
def sign_rsassa_pss(self, digest, algo='sha1', salt_length=20):
"""
Signs a digest with the private key using RSASSA-PSS
@requires: OpenSSL 0.9.7h or later.
@type digest: str
@param digest: A digest created by using the digest method
@type salt_length: int
@param salt_length: The length of the salt to use
@type algo: str
@param algo: The hash algorithm to use
@return: a string which is the signature
"""
hash = getattr(m2, algo, None)
if hash is None:
raise ValueError('not such hash algorithm %s' % hash_algo)
signature = m2.rsa_padding_add_pkcs1_pss(self.rsa, digest, hash(), salt_length)
return self.private_encrypt(signature, m2.no_padding)
def verify_rsassa_pss(self, data, signature, algo='sha1', salt_length=20):
"""
Verifies the signature RSASSA-PSS
@requires: OpenSSL 0.9.7h or later.
@type data: str
@param data: Data that has been signed
@type signature: str
@param signature: The signature signed with RSASSA-PSS
@type salt_length: int
@param salt_length: The length of the salt that was used
@type algo: str
@param algo: The hash algorithm to use
@return: 1 or 0, depending on whether the signature was
verified or not.
"""
hash = getattr(m2, algo, None)
if hash is None:
raise ValueError('not such hash algorithm %s' % hash_algo)
plain_signature = self.public_decrypt(signature, m2.no_padding)
return m2.rsa_verify_pkcs1_pss(self.rsa, data, plain_signature, hash(), salt_length)
def sign(self, digest, algo='sha1'):
"""
Signs a digest with the private key
@type digest: str
@param digest: A digest created by using the digest method
@type algo: str
@param algo: The method that created the digest.
Legal values are 'sha1','sha224', 'sha256', 'ripemd160',
and 'md5'.
@return: a string which is the signature
"""
digest_type = getattr(m2, 'NID_' + algo, None)
if digest_type is None:
raise ValueError, ('unknown algorithm', algo)
return m2.rsa_sign(self.rsa, digest, digest_type)
def verify(self, data, signature, algo='sha1'):
"""
Verifies the signature with the public key
@type data: str
@param data: Data that has been signed
@type signature: str
@param signature: The signature signed with the private key
@type algo: str
@param algo: The method use to create digest from the data
before it was signed. Legal values are 'sha1','sha224',
'sha256', 'ripemd160', and 'md5'.
@return: True or False, depending on whether the signature was
verified.
"""
digest_type = getattr(m2, 'NID_' + algo, None)
if digest_type is None:
raise ValueError, ('unknown algorithm', algo)
return m2.rsa_verify(self.rsa, data, signature, digest_type)
class RSA_pub(RSA):
"""
Object interface to an RSA public key.
"""
def __setattr__(self, name, value):
if name in ['e', 'n']:
raise RSAError, \
'use factory function new_pub_key() to set (e, n)'
else:
self.__dict__[name] = value
def private_encrypt(self, *argv):
raise RSAError, 'RSA_pub object has no private key'
def private_decrypt(self, *argv):
raise RSAError, 'RSA_pub object has no private key'
def save_key(self, file, *args, **kw):
"""
Save public key to file.
"""
return self.save_pub_key(file)
def save_key_bio(self, bio, *args, **kw):
"""
Save public key to BIO.
"""
return self.save_pub_key_bio(bio)
#save_key_der
#save_key_der_bio
def check_key(self):
return m2.rsa_check_pub_key(self.rsa)
def rsa_error():
raise RSAError, m2.err_reason_error_string(m2.err_get_error())
def keygen_callback(p, n, out=sys.stdout):
"""
Default callback for gen_key().
"""
ch = ['.','+','*','\n']
out.write(ch[p])
out.flush()
def gen_key(bits, e, callback=keygen_callback):
"""
Generate an RSA key pair.
@type bits: int
@param bits: Key length, in bits.
@type e: int
@param e: The RSA public exponent.
@type callback: Python callable
@param callback: A Python callable object that is invoked
during key generation; its usual purpose is to provide visual
feedback. The default callback is keygen_callback.
@rtype: M2Crypto.RSA.RSA
@return: M2Crypto.RSA.RSA object.
"""
return RSA(m2.rsa_generate_key(bits, e, callback), 1)
def load_key(file, callback=util.passphrase_callback):
"""
Load an RSA key pair from file.
@type file: string
@param file: Name of file containing RSA public key in PEM format.
@type callback: Python callable
@param callback: A Python callable object that is invoked
to acquire a passphrase with which to unlock the key.
The default is util.passphrase_callback.
@rtype: M2Crypto.RSA.RSA
@return: M2Crypto.RSA.RSA object.
"""
bio = BIO.openfile(file)
return load_key_bio(bio, callback)
def load_key_bio(bio, callback=util.passphrase_callback):
"""
Load an RSA key pair from an M2Crypto.BIO.BIO object.
@type bio: M2Crypto.BIO.BIO
@param bio: M2Crypto.BIO.BIO object containing RSA key pair in PEM
format.
@type callback: Python callable
@param callback: A Python callable object that is invoked
to acquire a passphrase with which to unlock the key.
The default is util.passphrase_callback.
@rtype: M2Crypto.RSA.RSA
@return: M2Crypto.RSA.RSA object.
"""
rsa = m2.rsa_read_key(bio._ptr(), callback)
if rsa is None:
rsa_error()
return RSA(rsa, 1)
def load_key_string(string, callback=util.passphrase_callback):
"""
Load an RSA key pair from a string.
@type string: string
@param string: String containing RSA key pair in PEM format.
@type callback: Python callable
@param callback: A Python callable object that is invoked
to acquire a passphrase with which to unlock the key.
The default is util.passphrase_callback.
@rtype: M2Crypto.RSA.RSA
@return: M2Crypto.RSA.RSA object.
"""
bio = BIO.MemoryBuffer(string)
return load_key_bio(bio, callback)
def load_pub_key(file):
"""
Load an RSA public key from file.
@type file: string
@param file: Name of file containing RSA public key in PEM format.
@rtype: M2Crypto.RSA.RSA_pub
@return: M2Crypto.RSA.RSA_pub object.
"""
bio = BIO.openfile(file)
return load_pub_key_bio(bio)
def load_pub_key_bio(bio):
"""
Load an RSA public key from an M2Crypto.BIO.BIO object.
@type bio: M2Crypto.BIO.BIO
@param bio: M2Crypto.BIO.BIO object containing RSA public key in PEM
format.
@rtype: M2Crypto.RSA.RSA_pub
@return: M2Crypto.RSA.RSA_pub object.
"""
rsa = m2.rsa_read_pub_key(bio._ptr())
if rsa is None:
rsa_error()
return RSA_pub(rsa, 1)
def new_pub_key((e, n)):
"""
Instantiate an RSA_pub object from an (e, n) tuple.
@type e: string
@param e: The RSA public exponent; it is a string in OpenSSL's MPINT
format - 4-byte big-endian bit-count followed by the appropriate
number of bits.
@type n: string
@param n: The RSA composite of primes; it is a string in OpenSSL's MPINT
format - 4-byte big-endian bit-count followed by the appropriate
number of bits.
@rtype: M2Crypto.RSA.RSA_pub
@return: M2Crypto.RSA.RSA_pub object.
"""
rsa = m2.rsa_new()
m2.rsa_set_e(rsa, e)
m2.rsa_set_n(rsa, n)
return RSA_pub(rsa, 1)
| gpl-3.0 |
wemanuel/smry | smry/server-auth/ls/google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/sns/__init__.py | 761 | 1104 | # Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
| apache-2.0 |
KokareIITP/django | django/core/serializers/json.py | 320 | 3782 | """
Serialize data to/from JSON
"""
# Avoid shadowing the standard library json module
from __future__ import absolute_import, unicode_literals
import datetime
import decimal
import json
import sys
import uuid
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import (
Deserializer as PythonDeserializer, Serializer as PythonSerializer,
)
from django.utils import six
from django.utils.timezone import is_aware
class Serializer(PythonSerializer):
"""
Convert a queryset to JSON.
"""
internal_use_only = False
def _init_options(self):
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
self.options.update({'use_decimal': False})
self._current = None
self.json_kwargs = self.options.copy()
self.json_kwargs.pop('stream', None)
self.json_kwargs.pop('fields', None)
if self.options.get('indent'):
# Prevent trailing spaces
self.json_kwargs['separators'] = (',', ': ')
def start_serialization(self):
self._init_options()
self.stream.write("[")
def end_serialization(self):
if self.options.get("indent"):
self.stream.write("\n")
self.stream.write("]")
if self.options.get("indent"):
self.stream.write("\n")
def end_object(self, obj):
# self._current has the field data
indent = self.options.get("indent")
if not self.first:
self.stream.write(",")
if not indent:
self.stream.write(" ")
if indent:
self.stream.write("\n")
json.dump(self.get_dump_object(obj), self.stream,
cls=DjangoJSONEncoder, **self.json_kwargs)
self._current = None
def getvalue(self):
# Grand-parent super
return super(PythonSerializer, self).getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of JSON data.
"""
if not isinstance(stream_or_string, (bytes, six.string_types)):
stream_or_string = stream_or_string.read()
if isinstance(stream_or_string, bytes):
stream_or_string = stream_or_string.decode('utf-8')
try:
objects = json.loads(stream_or_string)
for obj in PythonDeserializer(objects, **options):
yield obj
except GeneratorExit:
raise
except Exception as e:
# Map to deserializer error
six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])
class DjangoJSONEncoder(json.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time, decimal types and UUIDs.
"""
def default(self, o):
# See "Date Time String Format" in the ECMA-262 specification.
if isinstance(o, datetime.datetime):
r = o.isoformat()
if o.microsecond:
r = r[:23] + r[26:]
if r.endswith('+00:00'):
r = r[:-6] + 'Z'
return r
elif isinstance(o, datetime.date):
return o.isoformat()
elif isinstance(o, datetime.time):
if is_aware(o):
raise ValueError("JSON can't represent timezone-aware times.")
r = o.isoformat()
if o.microsecond:
r = r[:12]
return r
elif isinstance(o, decimal.Decimal):
return str(o)
elif isinstance(o, uuid.UUID):
return str(o)
else:
return super(DjangoJSONEncoder, self).default(o)
# Older, deprecated class name (for backwards compatibility purposes).
DateTimeAwareJSONEncoder = DjangoJSONEncoder
| bsd-3-clause |
singhj/gae-hello-world | mapreduce/key_ranges.py | 49 | 4157 | #!/usr/bin/env python
"""An abstract for a collection of key_range.KeyRange objects."""
from google.appengine.ext import key_range
from mapreduce import namespace_range
__all__ = [
"KeyRangesFactory",
"KeyRanges"]
# pylint: disable=g-bad-name
class KeyRangesFactory(object):
"""Factory for KeyRanges."""
@classmethod
def create_from_list(cls, list_of_key_ranges):
"""Create a KeyRanges object.
Args:
list_of_key_ranges: a list of key_range.KeyRange object.
Returns:
A _KeyRanges object.
"""
return _KeyRangesFromList(list_of_key_ranges)
@classmethod
def create_from_ns_range(cls, ns_range):
"""Create a KeyRanges object.
Args:
ns_range: a namespace_range.NameSpace Range object.
Returns:
A _KeyRanges object.
"""
return _KeyRangesFromNSRange(ns_range)
@classmethod
def from_json(cls, json):
"""Deserialize from json.
Args:
json: a dict of json compatible fields.
Returns:
a KeyRanges object.
Raises:
ValueError: if the json is invalid.
"""
if json["name"] in _KEYRANGES_CLASSES:
return _KEYRANGES_CLASSES[json["name"]].from_json(json)
raise ValueError("Invalid json %s", json)
class KeyRanges(object):
"""An abstraction for a collection of key_range.KeyRange objects."""
def __iter__(self):
return self
def next(self):
"""Iterator iteraface."""
raise NotImplementedError()
def to_json(self):
return {"name": self.__class__.__name__}
@classmethod
def from_json(cls):
raise NotImplementedError()
def __eq__(self):
raise NotImplementedError()
def __str__(self):
raise NotImplementedError()
class _KeyRangesFromList(KeyRanges):
"""Create KeyRanges from a list."""
def __init__(self, list_of_key_ranges):
self._key_ranges = list_of_key_ranges
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self._key_ranges == other._key_ranges
def next(self):
if self._key_ranges:
return self._key_ranges.pop()
raise StopIteration()
def __str__(self):
if len(self._key_ranges) == 1:
return "Single KeyRange %s" % (self._key_ranges[0])
if self._key_ranges:
return "From %s to %s" % (self._key_ranges[0], self._key_ranges[-1])
return "Empty KeyRange."
def to_json(self):
json = super(_KeyRangesFromList, self).to_json()
json.update(
{"list_of_key_ranges": [kr.to_json() for kr in self._key_ranges]})
return json
@classmethod
def from_json(cls, json):
return cls(
[key_range.KeyRange.from_json(kr) for kr in json["list_of_key_ranges"]])
class _KeyRangesFromNSRange(KeyRanges):
"""Create KeyRanges from a namespace range."""
def __init__(self, ns_range):
"""Init."""
self._ns_range = ns_range
if self._ns_range is not None:
self._iter = iter(self._ns_range)
self._last_ns = None
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self._ns_range == other._ns_range
def __str__(self):
return str(self._ns_range)
def next(self):
if self._ns_range is None:
raise StopIteration()
self._last_ns = self._iter.next()
current_ns_range = self._ns_range
if self._last_ns == self._ns_range.namespace_end:
self._ns_range = None
return key_range.KeyRange(namespace=self._last_ns,
_app=current_ns_range.app)
def to_json(self):
json = super(_KeyRangesFromNSRange, self).to_json()
ns_range = self._ns_range
if self._ns_range is not None and self._last_ns is not None:
ns_range = ns_range.with_start_after(self._last_ns)
if ns_range is not None:
json.update({"ns_range": ns_range.to_json_object()})
return json
@classmethod
def from_json(cls, json):
if "ns_range" in json:
return cls(
namespace_range.NamespaceRange.from_json_object(json["ns_range"]))
else:
return cls(None)
_KEYRANGES_CLASSES = {
_KeyRangesFromList.__name__: _KeyRangesFromList,
_KeyRangesFromNSRange.__name__: _KeyRangesFromNSRange
}
| apache-2.0 |
dynius/p2pool | p2pool/work.py | 42 | 23951 | from __future__ import division
import base64
import random
import re
import sys
import time
from twisted.internet import defer
from twisted.python import log
import bitcoin.getwork as bitcoin_getwork, bitcoin.data as bitcoin_data
from bitcoin import helper, script, worker_interface
from util import forest, jsonrpc, variable, deferral, math, pack
import p2pool, p2pool.data as p2pool_data
class WorkerBridge(worker_interface.WorkerBridge):
COINBASE_NONCE_LENGTH = 8
def __init__(self, node, my_pubkey_hash, donation_percentage, merged_urls, worker_fee):
worker_interface.WorkerBridge.__init__(self)
self.recent_shares_ts_work = []
self.node = node
self.my_pubkey_hash = my_pubkey_hash
self.donation_percentage = donation_percentage
self.worker_fee = worker_fee
self.net = self.node.net.PARENT
self.running = True
self.pseudoshare_received = variable.Event()
self.share_received = variable.Event()
self.local_rate_monitor = math.RateMonitor(10*60)
self.local_addr_rate_monitor = math.RateMonitor(10*60)
self.removed_unstales_var = variable.Variable((0, 0, 0))
self.removed_doa_unstales_var = variable.Variable(0)
self.my_share_hashes = set()
self.my_doa_share_hashes = set()
self.tracker_view = forest.TrackerView(self.node.tracker, forest.get_attributedelta_type(dict(forest.AttributeDelta.attrs,
my_count=lambda share: 1 if share.hash in self.my_share_hashes else 0,
my_doa_count=lambda share: 1 if share.hash in self.my_doa_share_hashes else 0,
my_orphan_announce_count=lambda share: 1 if share.hash in self.my_share_hashes and share.share_data['stale_info'] == 'orphan' else 0,
my_dead_announce_count=lambda share: 1 if share.hash in self.my_share_hashes and share.share_data['stale_info'] == 'doa' else 0,
)))
@self.node.tracker.verified.removed.watch
def _(share):
if share.hash in self.my_share_hashes and self.node.tracker.is_child_of(share.hash, self.node.best_share_var.value):
assert share.share_data['stale_info'] in [None, 'orphan', 'doa'] # we made these shares in this instance
self.removed_unstales_var.set((
self.removed_unstales_var.value[0] + 1,
self.removed_unstales_var.value[1] + (1 if share.share_data['stale_info'] == 'orphan' else 0),
self.removed_unstales_var.value[2] + (1 if share.share_data['stale_info'] == 'doa' else 0),
))
if share.hash in self.my_doa_share_hashes and self.node.tracker.is_child_of(share.hash, self.node.best_share_var.value):
self.removed_doa_unstales_var.set(self.removed_doa_unstales_var.value + 1)
# MERGED WORK
self.merged_work = variable.Variable({})
@defer.inlineCallbacks
def set_merged_work(merged_url, merged_userpass):
merged_proxy = jsonrpc.HTTPProxy(merged_url, dict(Authorization='Basic ' + base64.b64encode(merged_userpass)))
while self.running:
auxblock = yield deferral.retry('Error while calling merged getauxblock on %s:' % (merged_url,), 30)(merged_proxy.rpc_getauxblock)()
self.merged_work.set(math.merge_dicts(self.merged_work.value, {auxblock['chainid']: dict(
hash=int(auxblock['hash'], 16),
target='p2pool' if auxblock['target'] == 'p2pool' else pack.IntType(256).unpack(auxblock['target'].decode('hex')),
merged_proxy=merged_proxy,
)}))
yield deferral.sleep(1)
for merged_url, merged_userpass in merged_urls:
set_merged_work(merged_url, merged_userpass)
@self.merged_work.changed.watch
def _(new_merged_work):
print 'Got new merged mining work!'
# COMBINE WORK
self.current_work = variable.Variable(None)
def compute_work():
t = self.node.bitcoind_work.value
bb = self.node.best_block_header.value
if bb is not None and bb['previous_block'] == t['previous_block'] and self.node.net.PARENT.POW_FUNC(bitcoin_data.block_header_type.pack(bb)) <= t['bits'].target:
print 'Skipping from block %x to block %x!' % (bb['previous_block'],
bitcoin_data.hash256(bitcoin_data.block_header_type.pack(bb)))
t = dict(
version=bb['version'],
previous_block=bitcoin_data.hash256(bitcoin_data.block_header_type.pack(bb)),
bits=bb['bits'], # not always true
coinbaseflags='',
height=t['height'] + 1,
time=bb['timestamp'] + 600, # better way?
transactions=[],
transaction_fees=[],
merkle_link=bitcoin_data.calculate_merkle_link([None], 0),
subsidy=self.node.net.PARENT.SUBSIDY_FUNC(self.node.bitcoind_work.value['height']),
last_update=self.node.bitcoind_work.value['last_update'],
)
self.current_work.set(t)
self.node.bitcoind_work.changed.watch(lambda _: compute_work())
self.node.best_block_header.changed.watch(lambda _: compute_work())
compute_work()
self.new_work_event = variable.Event()
@self.current_work.transitioned.watch
def _(before, after):
# trigger LP if version/previous_block/bits changed or transactions changed from nothing
if any(before[x] != after[x] for x in ['version', 'previous_block', 'bits']) or (not before['transactions'] and after['transactions']):
self.new_work_event.happened()
self.merged_work.changed.watch(lambda _: self.new_work_event.happened())
self.node.best_share_var.changed.watch(lambda _: self.new_work_event.happened())
def stop(self):
self.running = False
def get_stale_counts(self):
'''Returns (orphans, doas), total, (orphans_recorded_in_chain, doas_recorded_in_chain)'''
my_shares = len(self.my_share_hashes)
my_doa_shares = len(self.my_doa_share_hashes)
delta = self.tracker_view.get_delta_to_last(self.node.best_share_var.value)
my_shares_in_chain = delta.my_count + self.removed_unstales_var.value[0]
my_doa_shares_in_chain = delta.my_doa_count + self.removed_doa_unstales_var.value
orphans_recorded_in_chain = delta.my_orphan_announce_count + self.removed_unstales_var.value[1]
doas_recorded_in_chain = delta.my_dead_announce_count + self.removed_unstales_var.value[2]
my_shares_not_in_chain = my_shares - my_shares_in_chain
my_doa_shares_not_in_chain = my_doa_shares - my_doa_shares_in_chain
return (my_shares_not_in_chain - my_doa_shares_not_in_chain, my_doa_shares_not_in_chain), my_shares, (orphans_recorded_in_chain, doas_recorded_in_chain)
def get_user_details(self, username):
contents = re.split('([+/])', username)
assert len(contents) % 2 == 1
user, contents2 = contents[0], contents[1:]
desired_pseudoshare_target = None
desired_share_target = None
for symbol, parameter in zip(contents2[::2], contents2[1::2]):
if symbol == '+':
try:
desired_pseudoshare_target = bitcoin_data.difficulty_to_target(float(parameter))
except:
if p2pool.DEBUG:
log.err()
elif symbol == '/':
try:
desired_share_target = bitcoin_data.difficulty_to_target(float(parameter))
except:
if p2pool.DEBUG:
log.err()
if random.uniform(0, 100) < self.worker_fee:
pubkey_hash = self.my_pubkey_hash
else:
try:
pubkey_hash = bitcoin_data.address_to_pubkey_hash(user, self.node.net.PARENT)
except: # XXX blah
pubkey_hash = self.my_pubkey_hash
return user, pubkey_hash, desired_share_target, desired_pseudoshare_target
def preprocess_request(self, user):
if (self.node.p2p_node is None or len(self.node.p2p_node.peers) == 0) and self.node.net.PERSIST:
raise jsonrpc.Error_for_code(-12345)(u'p2pool is not connected to any peers')
if time.time() > self.current_work.value['last_update'] + 60:
raise jsonrpc.Error_for_code(-12345)(u'lost contact with bitcoind')
user, pubkey_hash, desired_share_target, desired_pseudoshare_target = self.get_user_details(user)
return pubkey_hash, desired_share_target, desired_pseudoshare_target
def _estimate_local_hash_rate(self):
if len(self.recent_shares_ts_work) == 50:
hash_rate = sum(work for ts, work in self.recent_shares_ts_work[1:])//(self.recent_shares_ts_work[-1][0] - self.recent_shares_ts_work[0][0])
if hash_rate:
return hash_rate
return None
def get_local_rates(self):
miner_hash_rates = {}
miner_dead_hash_rates = {}
datums, dt = self.local_rate_monitor.get_datums_in_last()
for datum in datums:
miner_hash_rates[datum['user']] = miner_hash_rates.get(datum['user'], 0) + datum['work']/dt
if datum['dead']:
miner_dead_hash_rates[datum['user']] = miner_dead_hash_rates.get(datum['user'], 0) + datum['work']/dt
return miner_hash_rates, miner_dead_hash_rates
def get_local_addr_rates(self):
addr_hash_rates = {}
datums, dt = self.local_addr_rate_monitor.get_datums_in_last()
for datum in datums:
addr_hash_rates[datum['pubkey_hash']] = addr_hash_rates.get(datum['pubkey_hash'], 0) + datum['work']/dt
return addr_hash_rates
def get_work(self, pubkey_hash, desired_share_target, desired_pseudoshare_target):
if self.node.best_share_var.value is None and self.node.net.PERSIST:
raise jsonrpc.Error_for_code(-12345)(u'p2pool is downloading shares')
if self.merged_work.value:
tree, size = bitcoin_data.make_auxpow_tree(self.merged_work.value)
mm_hashes = [self.merged_work.value.get(tree.get(i), dict(hash=0))['hash'] for i in xrange(size)]
mm_data = '\xfa\xbemm' + bitcoin_data.aux_pow_coinbase_type.pack(dict(
merkle_root=bitcoin_data.merkle_hash(mm_hashes),
size=size,
nonce=0,
))
mm_later = [(aux_work, mm_hashes.index(aux_work['hash']), mm_hashes) for chain_id, aux_work in self.merged_work.value.iteritems()]
else:
mm_data = ''
mm_later = []
tx_hashes = [bitcoin_data.hash256(bitcoin_data.tx_type.pack(tx)) for tx in self.current_work.value['transactions']]
tx_map = dict(zip(tx_hashes, self.current_work.value['transactions']))
previous_share = self.node.tracker.items[self.node.best_share_var.value] if self.node.best_share_var.value is not None else None
if previous_share is None:
share_type = p2pool_data.Share
else:
previous_share_type = type(previous_share)
if previous_share_type.SUCCESSOR is None or self.node.tracker.get_height(previous_share.hash) < self.node.net.CHAIN_LENGTH:
share_type = previous_share_type
else:
successor_type = previous_share_type.SUCCESSOR
counts = p2pool_data.get_desired_version_counts(self.node.tracker,
self.node.tracker.get_nth_parent_hash(previous_share.hash, self.node.net.CHAIN_LENGTH*9//10), self.node.net.CHAIN_LENGTH//10)
upgraded = counts.get(successor_type.VERSION, 0)/sum(counts.itervalues())
if upgraded > .65:
print 'Switchover imminent. Upgraded: %.3f%% Threshold: %.3f%%' % (upgraded*100, 95)
print
# Share -> NewShare only valid if 95% of hashes in [net.CHAIN_LENGTH*9//10, net.CHAIN_LENGTH] for new version
if counts.get(successor_type.VERSION, 0) > sum(counts.itervalues())*95//100:
share_type = successor_type
else:
share_type = previous_share_type
if desired_share_target is None:
desired_share_target = 2**256-1
local_hash_rate = self._estimate_local_hash_rate()
if local_hash_rate is not None:
desired_share_target = min(desired_share_target,
bitcoin_data.average_attempts_to_target(local_hash_rate * self.node.net.SHARE_PERIOD / 0.0167)) # limit to 1.67% of pool shares by modulating share difficulty
local_addr_rates = self.get_local_addr_rates()
lookbehind = 3600//self.node.net.SHARE_PERIOD
block_subsidy = self.node.bitcoind_work.value['subsidy']
if previous_share is not None and self.node.tracker.get_height(previous_share.hash) > lookbehind:
expected_payout_per_block = local_addr_rates.get(pubkey_hash, 0)/p2pool_data.get_pool_attempts_per_second(self.node.tracker, self.node.best_share_var.value, lookbehind) \
* block_subsidy*(1-self.donation_percentage/100) # XXX doesn't use global stale rate to compute pool hash
if expected_payout_per_block < self.node.net.PARENT.DUST_THRESHOLD:
desired_share_target = min(desired_share_target,
bitcoin_data.average_attempts_to_target((bitcoin_data.target_to_average_attempts(self.node.bitcoind_work.value['bits'].target)*self.node.net.SPREAD)*self.node.net.PARENT.DUST_THRESHOLD/block_subsidy)
)
if True:
share_info, gentx, other_transaction_hashes, get_share = share_type.generate_transaction(
tracker=self.node.tracker,
share_data=dict(
previous_share_hash=self.node.best_share_var.value,
coinbase=(script.create_push_script([
self.current_work.value['height'],
] + ([mm_data] if mm_data else []) + [
]) + self.current_work.value['coinbaseflags'])[:100],
nonce=random.randrange(2**32),
pubkey_hash=pubkey_hash,
subsidy=self.current_work.value['subsidy'],
donation=math.perfect_round(65535*self.donation_percentage/100),
stale_info=(lambda (orphans, doas), total, (orphans_recorded_in_chain, doas_recorded_in_chain):
'orphan' if orphans > orphans_recorded_in_chain else
'doa' if doas > doas_recorded_in_chain else
None
)(*self.get_stale_counts()),
desired_version=(share_type.SUCCESSOR if share_type.SUCCESSOR is not None else share_type).VOTING_VERSION,
),
block_target=self.current_work.value['bits'].target,
desired_timestamp=int(time.time() + 0.5),
desired_target=desired_share_target,
ref_merkle_link=dict(branch=[], index=0),
desired_other_transaction_hashes_and_fees=zip(tx_hashes, self.current_work.value['transaction_fees']),
net=self.node.net,
known_txs=tx_map,
base_subsidy=self.node.net.PARENT.SUBSIDY_FUNC(self.current_work.value['height']),
)
packed_gentx = bitcoin_data.tx_type.pack(gentx)
other_transactions = [tx_map[tx_hash] for tx_hash in other_transaction_hashes]
mm_later = [(dict(aux_work, target=aux_work['target'] if aux_work['target'] != 'p2pool' else share_info['bits'].target), index, hashes) for aux_work, index, hashes in mm_later]
if desired_pseudoshare_target is None:
target = 2**256-1
local_hash_rate = self._estimate_local_hash_rate()
if local_hash_rate is not None:
target = min(target,
bitcoin_data.average_attempts_to_target(local_hash_rate * 1)) # limit to 1 share response every second by modulating pseudoshare difficulty
else:
target = desired_pseudoshare_target
target = max(target, share_info['bits'].target)
for aux_work, index, hashes in mm_later:
target = max(target, aux_work['target'])
target = math.clip(target, self.node.net.PARENT.SANE_TARGET_RANGE)
getwork_time = time.time()
lp_count = self.new_work_event.times
merkle_link = bitcoin_data.calculate_merkle_link([None] + other_transaction_hashes, 0)
print 'New work for worker! Difficulty: %.06f Share difficulty: %.06f Total block value: %.6f %s including %i transactions' % (
bitcoin_data.target_to_difficulty(target),
bitcoin_data.target_to_difficulty(share_info['bits'].target),
self.current_work.value['subsidy']*1e-8, self.node.net.PARENT.SYMBOL,
len(self.current_work.value['transactions']),
)
ba = dict(
version=min(self.current_work.value['version'], 2),
previous_block=self.current_work.value['previous_block'],
merkle_link=merkle_link,
coinb1=packed_gentx[:-self.COINBASE_NONCE_LENGTH-4],
coinb2=packed_gentx[-4:],
timestamp=self.current_work.value['time'],
bits=self.current_work.value['bits'],
share_target=target,
)
received_header_hashes = set()
def got_response(header, user, coinbase_nonce):
assert len(coinbase_nonce) == self.COINBASE_NONCE_LENGTH
new_packed_gentx = packed_gentx[:-self.COINBASE_NONCE_LENGTH-4] + coinbase_nonce + packed_gentx[-4:] if coinbase_nonce != '\0'*self.COINBASE_NONCE_LENGTH else packed_gentx
new_gentx = bitcoin_data.tx_type.unpack(new_packed_gentx) if coinbase_nonce != '\0'*self.COINBASE_NONCE_LENGTH else gentx
header_hash = bitcoin_data.hash256(bitcoin_data.block_header_type.pack(header))
pow_hash = self.node.net.PARENT.POW_FUNC(bitcoin_data.block_header_type.pack(header))
try:
if pow_hash <= header['bits'].target or p2pool.DEBUG:
helper.submit_block(dict(header=header, txs=[new_gentx] + other_transactions), False, self.node.factory, self.node.bitcoind, self.node.bitcoind_work, self.node.net)
if pow_hash <= header['bits'].target:
print
print 'GOT BLOCK FROM MINER! Passing to bitcoind! %s%064x' % (self.node.net.PARENT.BLOCK_EXPLORER_URL_PREFIX, header_hash)
print
except:
log.err(None, 'Error while processing potential block:')
user, _, _, _ = self.get_user_details(user)
assert header['previous_block'] == ba['previous_block']
assert header['merkle_root'] == bitcoin_data.check_merkle_link(bitcoin_data.hash256(new_packed_gentx), merkle_link)
assert header['bits'] == ba['bits']
on_time = self.new_work_event.times == lp_count
for aux_work, index, hashes in mm_later:
try:
if pow_hash <= aux_work['target'] or p2pool.DEBUG:
df = deferral.retry('Error submitting merged block: (will retry)', 10, 10)(aux_work['merged_proxy'].rpc_getauxblock)(
pack.IntType(256, 'big').pack(aux_work['hash']).encode('hex'),
bitcoin_data.aux_pow_type.pack(dict(
merkle_tx=dict(
tx=new_gentx,
block_hash=header_hash,
merkle_link=merkle_link,
),
merkle_link=bitcoin_data.calculate_merkle_link(hashes, index),
parent_block_header=header,
)).encode('hex'),
)
@df.addCallback
def _(result, aux_work=aux_work):
if result != (pow_hash <= aux_work['target']):
print >>sys.stderr, 'Merged block submittal result: %s Expected: %s' % (result, pow_hash <= aux_work['target'])
else:
print 'Merged block submittal result: %s' % (result,)
@df.addErrback
def _(err):
log.err(err, 'Error submitting merged block:')
except:
log.err(None, 'Error while processing merged mining POW:')
if pow_hash <= share_info['bits'].target and header_hash not in received_header_hashes:
last_txout_nonce = pack.IntType(8*self.COINBASE_NONCE_LENGTH).unpack(coinbase_nonce)
share = get_share(header, last_txout_nonce)
print 'GOT SHARE! %s %s prev %s age %.2fs%s' % (
user,
p2pool_data.format_hash(share.hash),
p2pool_data.format_hash(share.previous_hash),
time.time() - getwork_time,
' DEAD ON ARRIVAL' if not on_time else '',
)
self.my_share_hashes.add(share.hash)
if not on_time:
self.my_doa_share_hashes.add(share.hash)
self.node.tracker.add(share)
self.node.set_best_share()
try:
if (pow_hash <= header['bits'].target or p2pool.DEBUG) and self.node.p2p_node is not None:
self.node.p2p_node.broadcast_share(share.hash)
except:
log.err(None, 'Error forwarding block solution:')
self.share_received.happened(bitcoin_data.target_to_average_attempts(share.target), not on_time, share.hash)
if pow_hash > target:
print 'Worker %s submitted share with hash > target:' % (user,)
print ' Hash: %56x' % (pow_hash,)
print ' Target: %56x' % (target,)
elif header_hash in received_header_hashes:
print >>sys.stderr, 'Worker %s submitted share more than once!' % (user,)
else:
received_header_hashes.add(header_hash)
self.pseudoshare_received.happened(bitcoin_data.target_to_average_attempts(target), not on_time, user)
self.recent_shares_ts_work.append((time.time(), bitcoin_data.target_to_average_attempts(target)))
while len(self.recent_shares_ts_work) > 50:
self.recent_shares_ts_work.pop(0)
self.local_rate_monitor.add_datum(dict(work=bitcoin_data.target_to_average_attempts(target), dead=not on_time, user=user, share_target=share_info['bits'].target))
self.local_addr_rate_monitor.add_datum(dict(work=bitcoin_data.target_to_average_attempts(target), pubkey_hash=pubkey_hash))
return on_time
return ba, got_response
| gpl-3.0 |
iohannez/gnuradio | gr-filter/python/filter/qa_pfb_interpolator.py | 7 | 2724 | #!/usr/bin/env python
#
# Copyright 2012-2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import division
from gnuradio import gr, gr_unittest, filter, blocks
import math
def sig_source_c(samp_rate, freq, amp, N):
t = [float(x) / samp_rate for x in range(N)]
y = [math.cos(2.*math.pi*freq*x) + \
1j*math.sin(2.*math.pi*freq*x) for x in t]
return y
class test_pfb_interpolator(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_000(self):
N = 1000 # number of samples to use
M = 5 # Number of channels
fs = 1000 # baseband sampling rate
ofs = M*fs # output samp rate of interpolator
taps = filter.firdes.low_pass_2(M, ofs, fs / 4, fs / 10,
attenuation_dB=80,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
freq = 123.456
data = sig_source_c(fs, freq, 1, N)
signal = blocks.vector_source_c(data)
pfb = filter.pfb_interpolator_ccf(M, taps)
snk = blocks.vector_sink_c()
self.tb.connect(signal, pfb)
self.tb.connect(pfb, snk)
self.tb.run()
Ntest = 50
L = len(snk.data())
# Phase rotation through the filters
phase = 4.8870112969978994
# Create a time scale
t = [float(x) / ofs for x in range(0, L)]
# Create known data as complex sinusoids for the baseband freq
# of the extracted channel is due to decimator output order.
expected_data = [math.cos(2.*math.pi*freq*x+phase) + \
1j*math.sin(2.*math.pi*freq*x+phase) for x in t]
dst_data = snk.data()
self.assertComplexTuplesAlmostEqual(expected_data[-Ntest:], dst_data[-Ntest:], 4)
if __name__ == '__main__':
gr_unittest.run(test_pfb_interpolator, "test_pfb_interpolator.xml")
| gpl-3.0 |
ARM-software/lisa | lisa/generic.py | 2 | 5097 | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2020, Arm Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Generic types inspired by the :mod:`typing` module.
"""
import functools
from collections.abc import Mapping, Sequence
from operator import attrgetter
from lisa.utils import sphinx_register_nitpick_ignore
class GenericContainerMetaBase(type):
"""
Base class for the metaclass of generic containers.
They are parameterized with the ``type_`` class attribute, and classes can
also be created by indexing on classes with :class:`GenericContainerBase`
metaclass. The ``type_`` class attribute will be set with what is passed as
the key.
"""
def __instancecheck__(cls, instance):
try:
cls.instancecheck(instance)
except TypeError:
return False
else:
return True
# Fully memoize the function so that this always holds:
# assert Container[Foo] is Container[Foo]
@functools.lru_cache(maxsize=None, typed=True)
def __getitem__(cls, type_):
class NewClass(cls):
_type = type_
types = type_ if isinstance(type_, Sequence) else [type_]
def make_name(self_getter, sub_getter):
return '{}[{}]'.format(
self_getter(cls),
','.join(sub_getter(type_) for type_ in types)
)
NewClass.__name__ = make_name(
attrgetter('__name__'),
attrgetter('__name__')
)
def type_param_name(t):
if t.__module__ == 'builtins':
return t.__qualname__
else:
# Add the module name so that Sphinx can establish cross
# references
return f'{t.__module__}.{t.__qualname__}'
NewClass.__qualname__ = make_name(
attrgetter('__qualname__'),
type_param_name,
)
NewClass.__module__ = cls.__module__
# Since this type name is not resolvable, avoid cross reference
# warnings from Sphinx
sphinx_register_nitpick_ignore(NewClass)
return NewClass
class GenericContainerBase:
"""
Base class for generic containers.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
type(self).instancecheck(self)
class GenericMappingMeta(GenericContainerMetaBase, type(Mapping)):
"""
Metaclass for generic mapping containers.
It provides an ``__instancecheck__`` implementation that checks the type
of the keys and values. This make it suitable for input sanitizing based
on type checking.
"""
def instancecheck(cls, instance):
if not isinstance(instance, Mapping):
raise TypeError('not a Mapping')
k_type, v_type = cls._type
for k, v in instance.items():
if not isinstance(k, k_type):
raise TypeError(f'Key "{k}" of type {type(k).__qualname__} should be of type {k_type.__qualname__}', k)
if not isinstance(v, v_type):
raise TypeError(f'Value of {type(v).__qualname__} key "{k}" should be of type {v_type.__qualname__}', k)
class TypedDict(GenericContainerBase, dict, metaclass=GenericMappingMeta):
"""
Subclass of dict providing keys and values type check.
"""
pass
class GenericSequenceMeta(GenericContainerMetaBase, type(Sequence)):
"""Similar to :class:`GenericMappingMeta` for sequences"""
def instancecheck(cls, instance):
if not isinstance(instance, Sequence):
raise TypeError('not a Sequence')
type_ = cls._type
for i, x in enumerate(instance):
if not isinstance(x, type_):
raise TypeError(f'Item #{i} "{x}" of type {type(x).__qualname__} should be of type {type_.__qualname__}', i)
class GenericSortedSequenceMeta(GenericSequenceMeta):
def instancecheck(cls, instance):
super().instancecheck(instance)
for i, (x, y) in enumerate(zip(instance, instance[1:])):
if x > y:
raise TypeError(f'Item #{i} "{x}" is higher than the next item "{y}", but the list must be sorted')
class TypedList(GenericContainerBase, list, metaclass=GenericSequenceMeta):
"""
Subclass of list providing keys and values type check.
"""
pass
class SortedTypedList(GenericContainerBase, list, metaclass=GenericSortedSequenceMeta):
"""
Subclass of list providing keys and values type check, and also check the
list is sorted in ascending order.
"""
pass
| apache-2.0 |
mantidproject/mantid | scripts/test/Muon/quick_edit/quick_edit_presenter_test.py | 3 | 3612 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2021 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from unittest import mock
from Muon.GUI.Common.plot_widget.quick_edit.quick_edit_view import QuickEditView
from Muon.GUI.Common.plot_widget.quick_edit.quick_edit_presenter import QuickEditPresenter
from Muon.GUI.Common.contexts.plotting_context import PlottingContext
from mantid.simpleapi import AnalysisDataService
from mantidqt.utils.qt.testing import start_qapplication
@start_qapplication
class QuickEditTest(unittest.TestCase):
def setUp(self):
self.view = QuickEditView(None, None)
self.context = PlottingContext()
self.presenter = QuickEditPresenter(self.view, self.context)
def tearDown(self):
AnalysisDataService.Instance().clear()
def add_plots(self, num_plots):
for i in range(1, num_plots+1):
self.view.plot_selector.addItem("Plot " + str(i))
def test_add_subplot(self):
self.view.set_selection = mock.Mock()
self.assertEqual(1, self.view.plot_selector.count())
self.presenter.add_subplot("Plot 1")
self.assertEqual(2, self.view.plot_selector.count())
self.view.set_selection.assert_called_once_with(0) # Selection should not change
def test_clear_subplots(self):
self.add_plots(3)
self.presenter.clear_subplots()
self.assertEqual(1, self.view.plot_selector.count()) # Expect All plot to still be there
def test_all(self):
self.add_plots(3)
expected = ["Plot 1", "Plot 2", "Plot 3"]
self.assertEqual(expected, self.presenter.get_all_subplots)
def test_set_errors_same_as_previous(self):
self.view.get_errors = mock.Mock(return_value=False)
self.view.set_errors = mock.Mock()
self.presenter.set_errors(False)
self.view.set_errors.assert_not_called()
def test_set_errors_not_same_as_previous(self):
self.view.get_errors = mock.Mock(return_value=False)
self.view.set_errors = mock.Mock()
self.presenter.set_errors(True)
self.view.set_errors.assert_called_once_with(True)
def test_get_selection_all(self):
self.add_plots(2)
self.view.current_selection = mock.Mock(return_value="All")
expected = ["Plot 1", "Plot 2"]
self.assertEqual(expected, self.presenter.get_selection())
def test_get_selection(self):
self.add_plots(2)
self.view.current_selection = mock.Mock(return_value="Plot 1")
self.assertEqual(["Plot 1"], self.presenter.get_selection())
def test_remove_subplot(self):
self.add_plots(2)
self.view.plot_selector.currentText = mock.Mock(return_value="Plot 2")
self.view.set_selection = mock.Mock()
self.view.rm_subplot = mock.Mock()
self.presenter.rm_subplot("Plot 1")
self.view.rm_subplot.assert_called_once_with(1)
def test_remove_current_subplot(self):
self.add_plots(2)
self.view.current_selection = mock.Mock(return_value="Plot 2")
self.view.set_selection = mock.Mock()
self.view.rm_subplot = mock.Mock()
self.presenter.rm_subplot("Plot 2")
self.view.rm_subplot.assert_called_once_with(2)
self.view.set_selection.assert_called_once_with(0) # Set to all
if __name__ == '__main__':
unittest.main(buffer=False, verbosity=2)
| gpl-3.0 |
sexroute/commandergenius | project/jni/python/src/Tools/scripts/nm2def.py | 94 | 2444 | #! /usr/bin/env python
"""nm2def.py
Helpers to extract symbols from Unix libs and auto-generate
Windows definition files from them. Depends on nm(1). Tested
on Linux and Solaris only (-p option to nm is for Solaris only).
By Marc-Andre Lemburg, Aug 1998.
Additional notes: the output of nm is supposed to look like this:
acceler.o:
000001fd T PyGrammar_AddAccelerators
U PyGrammar_FindDFA
00000237 T PyGrammar_RemoveAccelerators
U _IO_stderr_
U exit
U fprintf
U free
U malloc
U printf
grammar1.o:
00000000 T PyGrammar_FindDFA
00000034 T PyGrammar_LabelRepr
U _PyParser_TokenNames
U abort
U printf
U sprintf
...
Even if this isn't the default output of your nm, there is generally an
option to produce this format (since it is the original v7 Unix format).
"""
import os, sys
PYTHONLIB = 'libpython'+sys.version[:3]+'.a'
PC_PYTHONLIB = 'Python'+sys.version[0]+sys.version[2]+'.dll'
NM = 'nm -p -g %s' # For Linux, use "nm -g %s"
def symbols(lib=PYTHONLIB,types=('T','C','D')):
lines = os.popen(NM % lib).readlines()
lines = [s.strip() for s in lines]
symbols = {}
for line in lines:
if len(line) == 0 or ':' in line:
continue
items = line.split()
if len(items) != 3:
continue
address, type, name = items
if type not in types:
continue
symbols[name] = address,type
return symbols
def export_list(symbols):
data = []
code = []
for name,(addr,type) in symbols.items():
if type in ('C','D'):
data.append('\t'+name)
else:
code.append('\t'+name)
data.sort()
data.append('')
code.sort()
return ' DATA\n'.join(data)+'\n'+'\n'.join(code)
# Definition file template
DEF_TEMPLATE = """\
EXPORTS
%s
"""
# Special symbols that have to be included even though they don't
# pass the filter
SPECIALS = (
)
def filter_Python(symbols,specials=SPECIALS):
for name in symbols.keys():
if name[:2] == 'Py' or name[:3] == '_Py':
pass
elif name not in specials:
del symbols[name]
def main():
s = symbols(PYTHONLIB)
filter_Python(s)
exports = export_list(s)
f = sys.stdout # open('PC/python_nt.def','w')
f.write(DEF_TEMPLATE % (exports))
f.close()
if __name__ == '__main__':
main()
| lgpl-2.1 |
amishb/youtube-dl | test/test_age_restriction.py | 171 | 1379 | #!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import try_rm
from youtube_dl import YoutubeDL
def _download_restricted(url, filename, age):
""" Returns true if the file has been downloaded """
params = {
'age_limit': age,
'skip_download': True,
'writeinfojson': True,
'outtmpl': '%(id)s.%(ext)s',
}
ydl = YoutubeDL(params)
ydl.add_default_info_extractors()
json_filename = os.path.splitext(filename)[0] + '.info.json'
try_rm(json_filename)
ydl.download([url])
res = os.path.exists(json_filename)
try_rm(json_filename)
return res
class TestAgeRestriction(unittest.TestCase):
def _assert_restricted(self, url, filename, age, old_age=None):
self.assertTrue(_download_restricted(url, filename, old_age))
self.assertFalse(_download_restricted(url, filename, age))
def test_youtube(self):
self._assert_restricted('07FYdnEawAQ', '07FYdnEawAQ.mp4', 10)
def test_youporn(self):
self._assert_restricted(
'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/',
'505835.mp4', 2, old_age=25)
if __name__ == '__main__':
unittest.main()
| unlicense |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.