repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
sobercoder/gem5 | util/stats/chart.py | 90 | 3507 | # Copyright (c) 2005-2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Lisa Hsu
class ChartOptions(object):
defaults = { 'chart_size' : (8, 4),
'figure_size' : [0.1, 0.1, 0.6, 0.85],
'title' : None,
'fig_legend' : True,
'legend' : None,
'legend_loc' : 'upper right',
'legend_size' : 6,
'colormap' : 'jet',
'xlabel' : None,
'ylabel' : None,
'xticks' : None,
'xsubticks' : None,
'yticks' : None,
'ylim' : None,
}
def __init__(self, options=None, **kwargs):
self.init(options, **kwargs)
def clear(self):
self.options = {}
def init(self, options=None, **kwargs):
self.clear()
self.update(options, **kwargs)
def update(self, options=None, **kwargs):
if options is not None:
if not isinstance(options, ChartOptions):
raise AttributeError, \
'attribute options of type %s should be %s' % \
(type(options), ChartOptions)
self.options.update(options.options)
for key,value in kwargs.iteritems():
if key not in ChartOptions.defaults:
raise AttributeError, \
"%s instance has no attribute '%s'" % (type(self), key)
self.options[key] = value
def __getattr__(self, attr):
if attr in self.options:
return self.options[attr]
if attr in ChartOptions.defaults:
return ChartOptions.defaults[attr]
raise AttributeError, \
"%s instance has no attribute '%s'" % (type(self), attr)
def __setattr__(self, attr, value):
if attr in ChartOptions.defaults:
self.options[attr] = value
else:
super(ChartOptions, self).__setattr__(attr, value)
| bsd-3-clause |
systers/hyperkitty | hyperkitty/context_processors.py | 1 | 1285 | # -*- coding: utf-8 -*-
# Copyright (C) 2012-2017 by the Free Software Foundation, Inc.
#
# This file is part of HyperKitty.
#
# HyperKitty is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# HyperKitty is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# HyperKitty. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aamir Khan <syst3m.w0rm@gmail.com>
# Author: Aurelien Bompard <abompard@fedoraproject.org>
#
from django.conf import settings
from hyperkitty import VERSION
def common(request):
extra_context = {}
extra_context.update(export_settings(request))
return extra_context
def export_settings(request):
exports = ["USE_MOCKUPS"]
extra_context = dict(
(name.lower(), getattr(settings, name, None)) for name in exports)
extra_context["HYPERKITTY_VERSION"] = VERSION
return extra_context
| gpl-3.0 |
stevecassidy/graf-python | src/tests/test_graph.py | 1 | 7311 | # -*- coding: utf-8 -*-
#
# Poio Tools for Linguists
#
# Copyright (C) 2009-2012 Poio Project
# Author: António Lopes <alopes@cidles.eu>
# URL: <http://www.cidles.eu/ltll/poio>
# For license information, see LICENSE.TXT
"""This module contains the tests to the class
AnnotationSpace, Edge, Graph, Node and Region.
This test serves to ensure the viability of the
methods of the classes.
"""
from graf import Graph, AnnotationSpace, Annotation, Node, Edge, Region
import unittest
class TestGraph(unittest.TestCase):
"""
This class contains the test methods that influence
the creation of the members in a GrAF.
"""
def setUp(self):
self.graph = Graph()
def test_create_annotation_space(self):
# Test values
as_id = 'as_id'
aspace = self.graph.annotation_spaces.create(as_id)
assert(aspace.as_id == as_id)
assert(list(self.graph.annotation_spaces) == [aspace])
def test_add_annotation_space(self):
# Test values
as_id = 'as_id'
aspace = AnnotationSpace(as_id)
self.graph.annotation_spaces.add(aspace)
assert(self.graph.annotation_spaces[as_id] == aspace)
def test_add_edge(self):
# Test values
fnode = Node('node_1') # From Node
tnode = Node('node_2') # To Node
edge = Edge('id_test', fnode, tnode)
self.graph.nodes.add(fnode)
self.graph.nodes.add(tnode)
self.graph.edges.add(edge)
assert(list(self.graph.edges)[0] == edge)
def test_create_edge(self):
# Test values
fnode = Node('node_1') # From Node
tnode = Node('node_2') # To Node
self.graph.create_edge(fnode, tnode, id='3')
assert(1 == len(self.graph.edges))
assert(self.graph.edges['3'].from_node == fnode)
assert(self.graph.edges['3'].to_node == tnode)
def test_add_feature(self):
name = 'feature'
value = 'value'
self.graph.features[name] = value
assert(self.graph.features[name] == value)
def test_add_node(self):
node = Node('test_node')
self.graph.nodes.add(node)
assert(list(self.graph.nodes) == [node])
def test_add_region(self):
# Test values
# The Region needs at least 2 anchors
#anchor = Anchor(t) # Tokenizer
anchors = ['anchor1', 'anchor2']
id = '1'
region = Region(id, *anchors)
self.graph.regions.add(region)
assert(list(self.graph.regions) == [region])
def test_get_edge_by_id(self):
fnode = Node('node_1') # From Node
tnode = Node('node_2') # To Node
edge = Edge('id_test', fnode, tnode)
self.graph.nodes.add(fnode)
self.graph.nodes.add(tnode)
self.graph.edges.add(edge)
assert(self.graph.edges['id_test'] == edge)
def test_get_edge_by_nodes(self):
fnode = Node('node_1') # From Node
tnode = Node('node_2') # To Node
edge = Edge('id_test', fnode, tnode)
self.graph.nodes.add(fnode)
self.graph.nodes.add(tnode)
self.graph.edges.add(edge)
assert(self.graph.find_edge(fnode, tnode) ==edge)
assert(self.graph.find_edge(fnode.id, tnode.id) ==edge)
def test_get_node(self):
node = Node('test_node')
self.graph.nodes.add(node)
assert(self.graph.nodes['test_node'] ==node)
def test_get_region(self):
node = Node('test_node')
self.graph.nodes.add(node)
assert(self.graph.nodes['test_node'] ==node)
def test_get_annotation_space(self):
aspace = AnnotationSpace('as_id')
self.graph.annotation_spaces.add(aspace)
assert(self.graph.annotation_spaces['as_id'] ==aspace)
def test_get_region_from_id(self):
region = Region('1', 'anchor1', 'anchor2')
self.graph.regions.add(region)
assert(self.graph.regions['1'] ==region)
def test_get_region_from_anchors(self):
region = Region('1', 'anchor1', 'anchor2')
self.graph.regions.add(region)
assert(self.graph.get_region('anchor1', 'anchor2') == region)
def test_get_root(self):
node = Node('test_node')
self.graph.nodes.add(node)
self.graph.root = node
assert(self.graph.root == node)
def test_iter_roots(self):
node = Node('test_node')
self.graph.nodes.add(node)
self.graph.root = node
assert(list(self.graph.iter_roots()) == [node])
def test_parents_and_children(self):
n1 = Node('n1')
n2 = Node('n2')
n3 = Node('n3')
n4 = Node('n4')
self.graph.nodes.add(n1)
self.graph.nodes.add(n2)
self.graph.nodes.add(n3)
self.graph.nodes.add(n4)
self.graph.create_edge(n1, n2)
self.graph.create_edge(n2, n1)
self.graph.create_edge(n1, n3)
self.graph.create_edge(n3, n4)
assert(list(n1.iter_children()) == [n2, n3])
assert(list(n2.iter_children()) == [n1])
assert(list(n3.iter_children()) == [n4])
assert(list(n4.iter_children()) == [])
assert(list(n1.iter_parents()) == [n2])
assert(list(n2.iter_parents()) == [n1])
assert(list(n3.iter_parents()) == [n1])
assert(list(n4.iter_parents()) == [n3])
# TODO: Test makes wrong assumption. The problem is not that
# Annotations might get added twice, but that one file might
# be parsed twice.
# def test_verify_annotation_existence(self):
# """ Verification if the same annotation is parsed
# more then one time. The same annotation can only
# exist and allowed to be added one time.
# """
# node = Node('test_node')
# annotation_1 = Annotation('annotation_value', None, 'id-1')
# # Same id
# annotation_2 = Annotation('annotation_value', None, 'id-1')
# # Add the first node
# node.annotations.add(annotation_1)
# # Try to add again the same annotation
# node.annotations.add(annotation_2)
# self.graph.nodes.add(node)
# expected_result = 1
# element = self.graph.get_element('test_node')
# assert(len(element.annotations) == expected_result)
# TODO: Test makes wrong assumption. The problem is not that
# Annotations might get added twice, but that one file might
# be parsed twice.
# def test_verify_edge_existence(self):
# """ Verification if the same edge is parsed
# more then one time. The same edge can only
# exist and allowed to be added one time.
# """
# # Test values
# fnode = Node('node_1') # From Node
# tnode = Node('node_2') # To Node
# edge_1 = Edge('id_test', fnode, tnode)
# # Same id
# edge_2 = Edge('id_test', fnode, tnode)
# self.graph.nodes.add(fnode)
# self.graph.nodes.add(tnode)
# self.graph.edges.add(edge_1)
# # Try to add again the edge annotation
# self.graph.edges.add(edge_2)
# assert(len(self.graph.edges) == 1)
| apache-2.0 |
TathagataChakraborti/resource-conflicts | PLANROB-2015/seq-sat-lama/Python-2.5.2/Lib/distutils/command/sdist.py | 5 | 17860 | """distutils.command.sdist
Implements the Distutils 'sdist' command (create a source distribution)."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: sdist.py 38697 2005-03-23 18:54:36Z loewis $"
import sys, os, string
from types import *
from glob import glob
from distutils.core import Command
from distutils import dir_util, dep_util, file_util, archive_util
from distutils.text_file import TextFile
from distutils.errors import *
from distutils.filelist import FileList
from distutils import log
def show_formats ():
"""Print all possible values for the 'formats' option (used by
the "--help-formats" command-line option).
"""
from distutils.fancy_getopt import FancyGetopt
from distutils.archive_util import ARCHIVE_FORMATS
formats=[]
for format in ARCHIVE_FORMATS.keys():
formats.append(("formats=" + format, None,
ARCHIVE_FORMATS[format][2]))
formats.sort()
pretty_printer = FancyGetopt(formats)
pretty_printer.print_help(
"List of available source distribution formats:")
class sdist (Command):
description = "create a source distribution (tarball, zip file, etc.)"
user_options = [
('template=', 't',
"name of manifest template file [default: MANIFEST.in]"),
('manifest=', 'm',
"name of manifest file [default: MANIFEST]"),
('use-defaults', None,
"include the default file set in the manifest "
"[default; disable with --no-defaults]"),
('no-defaults', None,
"don't include the default file set"),
('prune', None,
"specifically exclude files/directories that should not be "
"distributed (build tree, RCS/CVS dirs, etc.) "
"[default; disable with --no-prune]"),
('no-prune', None,
"don't automatically exclude anything"),
('manifest-only', 'o',
"just regenerate the manifest and then stop "
"(implies --force-manifest)"),
('force-manifest', 'f',
"forcibly regenerate the manifest and carry on as usual"),
('formats=', None,
"formats for source distribution (comma-separated list)"),
('keep-temp', 'k',
"keep the distribution tree around after creating " +
"archive file(s)"),
('dist-dir=', 'd',
"directory to put the source distribution archive(s) in "
"[default: dist]"),
]
boolean_options = ['use-defaults', 'prune',
'manifest-only', 'force-manifest',
'keep-temp']
help_options = [
('help-formats', None,
"list available distribution formats", show_formats),
]
negative_opt = {'no-defaults': 'use-defaults',
'no-prune': 'prune' }
default_format = { 'posix': 'gztar',
'nt': 'zip' }
def initialize_options (self):
# 'template' and 'manifest' are, respectively, the names of
# the manifest template and manifest file.
self.template = None
self.manifest = None
# 'use_defaults': if true, we will include the default file set
# in the manifest
self.use_defaults = 1
self.prune = 1
self.manifest_only = 0
self.force_manifest = 0
self.formats = None
self.keep_temp = 0
self.dist_dir = None
self.archive_files = None
def finalize_options (self):
if self.manifest is None:
self.manifest = "MANIFEST"
if self.template is None:
self.template = "MANIFEST.in"
self.ensure_string_list('formats')
if self.formats is None:
try:
self.formats = [self.default_format[os.name]]
except KeyError:
raise DistutilsPlatformError, \
"don't know how to create source distributions " + \
"on platform %s" % os.name
bad_format = archive_util.check_archive_formats(self.formats)
if bad_format:
raise DistutilsOptionError, \
"unknown archive format '%s'" % bad_format
if self.dist_dir is None:
self.dist_dir = "dist"
def run (self):
# 'filelist' contains the list of files that will make up the
# manifest
self.filelist = FileList()
# Ensure that all required meta-data is given; warn if not (but
# don't die, it's not *that* serious!)
self.check_metadata()
# Do whatever it takes to get the list of files to process
# (process the manifest template, read an existing manifest,
# whatever). File list is accumulated in 'self.filelist'.
self.get_file_list()
# If user just wanted us to regenerate the manifest, stop now.
if self.manifest_only:
return
# Otherwise, go ahead and create the source distribution tarball,
# or zipfile, or whatever.
self.make_distribution()
def check_metadata (self):
"""Ensure that all required elements of meta-data (name, version,
URL, (author and author_email) or (maintainer and
maintainer_email)) are supplied by the Distribution object; warn if
any are missing.
"""
metadata = self.distribution.metadata
missing = []
for attr in ('name', 'version', 'url'):
if not (hasattr(metadata, attr) and getattr(metadata, attr)):
missing.append(attr)
if missing:
self.warn("missing required meta-data: " +
string.join(missing, ", "))
if metadata.author:
if not metadata.author_email:
self.warn("missing meta-data: if 'author' supplied, " +
"'author_email' must be supplied too")
elif metadata.maintainer:
if not metadata.maintainer_email:
self.warn("missing meta-data: if 'maintainer' supplied, " +
"'maintainer_email' must be supplied too")
else:
self.warn("missing meta-data: either (author and author_email) " +
"or (maintainer and maintainer_email) " +
"must be supplied")
# check_metadata ()
def get_file_list (self):
"""Figure out the list of files to include in the source
distribution, and put it in 'self.filelist'. This might involve
reading the manifest template (and writing the manifest), or just
reading the manifest, or just using the default file set -- it all
depends on the user's options and the state of the filesystem.
"""
# If we have a manifest template, see if it's newer than the
# manifest; if so, we'll regenerate the manifest.
template_exists = os.path.isfile(self.template)
if template_exists:
template_newer = dep_util.newer(self.template, self.manifest)
# The contents of the manifest file almost certainly depend on the
# setup script as well as the manifest template -- so if the setup
# script is newer than the manifest, we'll regenerate the manifest
# from the template. (Well, not quite: if we already have a
# manifest, but there's no template -- which will happen if the
# developer elects to generate a manifest some other way -- then we
# can't regenerate the manifest, so we don't.)
self.debug_print("checking if %s newer than %s" %
(self.distribution.script_name, self.manifest))
setup_newer = dep_util.newer(self.distribution.script_name,
self.manifest)
# cases:
# 1) no manifest, template exists: generate manifest
# (covered by 2a: no manifest == template newer)
# 2) manifest & template exist:
# 2a) template or setup script newer than manifest:
# regenerate manifest
# 2b) manifest newer than both:
# do nothing (unless --force or --manifest-only)
# 3) manifest exists, no template:
# do nothing (unless --force or --manifest-only)
# 4) no manifest, no template: generate w/ warning ("defaults only")
manifest_outofdate = (template_exists and
(template_newer or setup_newer))
force_regen = self.force_manifest or self.manifest_only
manifest_exists = os.path.isfile(self.manifest)
neither_exists = (not template_exists and not manifest_exists)
# Regenerate the manifest if necessary (or if explicitly told to)
if manifest_outofdate or neither_exists or force_regen:
if not template_exists:
self.warn(("manifest template '%s' does not exist " +
"(using default file list)") %
self.template)
self.filelist.findall()
if self.use_defaults:
self.add_defaults()
if template_exists:
self.read_template()
if self.prune:
self.prune_file_list()
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
# Don't regenerate the manifest, just read it in.
else:
self.read_manifest()
# get_file_list ()
def add_defaults (self):
"""Add all the default files to self.filelist:
- README or README.txt
- setup.py
- test/test*.py
- all pure Python modules mentioned in setup script
- all C sources listed as part of extensions or C libraries
in the setup script (doesn't catch C headers!)
Warns if (README or README.txt) or setup.py are missing; everything
else is optional.
"""
standards = [('README', 'README.txt'), self.distribution.script_name]
for fn in standards:
if type(fn) is TupleType:
alts = fn
got_it = 0
for fn in alts:
if os.path.exists(fn):
got_it = 1
self.filelist.append(fn)
break
if not got_it:
self.warn("standard file not found: should have one of " +
string.join(alts, ', '))
else:
if os.path.exists(fn):
self.filelist.append(fn)
else:
self.warn("standard file '%s' not found" % fn)
optional = ['test/test*.py', 'setup.cfg']
for pattern in optional:
files = filter(os.path.isfile, glob(pattern))
if files:
self.filelist.extend(files)
if self.distribution.has_pure_modules():
build_py = self.get_finalized_command('build_py')
self.filelist.extend(build_py.get_source_files())
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
self.filelist.extend(build_ext.get_source_files())
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.filelist.extend(build_clib.get_source_files())
if self.distribution.has_scripts():
build_scripts = self.get_finalized_command('build_scripts')
self.filelist.extend(build_scripts.get_source_files())
# add_defaults ()
def read_template (self):
"""Read and parse manifest template file named by self.template.
(usually "MANIFEST.in") The parsing and processing is done by
'self.filelist', which updates itself accordingly.
"""
log.info("reading manifest template '%s'", self.template)
template = TextFile(self.template,
strip_comments=1,
skip_blanks=1,
join_lines=1,
lstrip_ws=1,
rstrip_ws=1,
collapse_join=1)
while 1:
line = template.readline()
if line is None: # end of file
break
try:
self.filelist.process_template_line(line)
except DistutilsTemplateError, msg:
self.warn("%s, line %d: %s" % (template.filename,
template.current_line,
msg))
# read_template ()
def prune_file_list (self):
"""Prune off branches that might slip into the file list as created
by 'read_template()', but really don't belong there:
* the build tree (typically "build")
* the release tree itself (only an issue if we ran "sdist"
previously with --keep-temp, or it aborted)
* any RCS, CVS and .svn directories
"""
build = self.get_finalized_command('build')
base_dir = self.distribution.get_fullname()
self.filelist.exclude_pattern(None, prefix=build.build_base)
self.filelist.exclude_pattern(None, prefix=base_dir)
self.filelist.exclude_pattern(r'/(RCS|CVS|\.svn)/.*', is_regex=1)
def write_manifest (self):
"""Write the file list in 'self.filelist' (presumably as filled in
by 'add_defaults()' and 'read_template()') to the manifest file
named by 'self.manifest'.
"""
self.execute(file_util.write_file,
(self.manifest, self.filelist.files),
"writing manifest file '%s'" % self.manifest)
# write_manifest ()
def read_manifest (self):
"""Read the manifest file (named by 'self.manifest') and use it to
fill in 'self.filelist', the list of files to include in the source
distribution.
"""
log.info("reading manifest file '%s'", self.manifest)
manifest = open(self.manifest)
while 1:
line = manifest.readline()
if line == '': # end of file
break
if line[-1] == '\n':
line = line[0:-1]
self.filelist.append(line)
# read_manifest ()
def make_release_tree (self, base_dir, files):
"""Create the directory tree that will become the source
distribution archive. All directories implied by the filenames in
'files' are created under 'base_dir', and then we hard link or copy
(if hard linking is unavailable) those files into place.
Essentially, this duplicates the developer's source tree, but in a
directory named after the distribution, containing only the files
to be distributed.
"""
# Create all the directories under 'base_dir' necessary to
# put 'files' there; the 'mkpath()' is just so we don't die
# if the manifest happens to be empty.
self.mkpath(base_dir)
dir_util.create_tree(base_dir, files, dry_run=self.dry_run)
# And walk over the list of files, either making a hard link (if
# os.link exists) to each one that doesn't already exist in its
# corresponding location under 'base_dir', or copying each file
# that's out-of-date in 'base_dir'. (Usually, all files will be
# out-of-date, because by default we blow away 'base_dir' when
# we're done making the distribution archives.)
if hasattr(os, 'link'): # can make hard links on this system
link = 'hard'
msg = "making hard links in %s..." % base_dir
else: # nope, have to copy
link = None
msg = "copying files to %s..." % base_dir
if not files:
log.warn("no files to distribute -- empty manifest?")
else:
log.info(msg)
for file in files:
if not os.path.isfile(file):
log.warn("'%s' not a regular file -- skipping" % file)
else:
dest = os.path.join(base_dir, file)
self.copy_file(file, dest, link=link)
self.distribution.metadata.write_pkg_info(base_dir)
# make_release_tree ()
def make_distribution (self):
"""Create the source distribution(s). First, we create the release
tree with 'make_release_tree()'; then, we create all required
archive files (according to 'self.formats') from the release tree.
Finally, we clean up by blowing away the release tree (unless
'self.keep_temp' is true). The list of archive files created is
stored so it can be retrieved later by 'get_archive_files()'.
"""
# Don't warn about missing meta-data here -- should be (and is!)
# done elsewhere.
base_dir = self.distribution.get_fullname()
base_name = os.path.join(self.dist_dir, base_dir)
self.make_release_tree(base_dir, self.filelist.files)
archive_files = [] # remember names of files we create
for fmt in self.formats:
file = self.make_archive(base_name, fmt, base_dir=base_dir)
archive_files.append(file)
self.distribution.dist_files.append(('sdist', '', file))
self.archive_files = archive_files
if not self.keep_temp:
dir_util.remove_tree(base_dir, dry_run=self.dry_run)
def get_archive_files (self):
"""Return the list of archive files created when the command
was run, or None if the command hasn't run yet.
"""
return self.archive_files
# class sdist
| mit |
michaelray/Iristyle-ChocolateyPackages | EthanBrown.SublimeText2.EditorPackages/tools/PackageCache/Markdown Preview/markdown/blockprocessors.py | 47 | 22371 | """
CORE MARKDOWN BLOCKPARSER
===========================================================================
This parser handles basic parsing of Markdown blocks. It doesn't concern itself
with inline elements such as **bold** or *italics*, but rather just catches
blocks, lists, quotes, etc.
The BlockParser is made up of a bunch of BlockProssors, each handling a
different type of block. Extensions may add/replace/remove BlockProcessors
as they need to alter how markdown blocks are parsed.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import logging
import re
from . import util
from .blockparser import BlockParser
logger = logging.getLogger('MARKDOWN')
def build_block_parser(md_instance, **kwargs):
""" Build the default block parser used by Markdown. """
parser = BlockParser(md_instance)
parser.blockprocessors['empty'] = EmptyBlockProcessor(parser)
parser.blockprocessors['indent'] = ListIndentProcessor(parser)
parser.blockprocessors['code'] = CodeBlockProcessor(parser)
parser.blockprocessors['hashheader'] = HashHeaderProcessor(parser)
parser.blockprocessors['setextheader'] = SetextHeaderProcessor(parser)
parser.blockprocessors['hr'] = HRProcessor(parser)
parser.blockprocessors['olist'] = OListProcessor(parser)
parser.blockprocessors['ulist'] = UListProcessor(parser)
parser.blockprocessors['quote'] = BlockQuoteProcessor(parser)
parser.blockprocessors['paragraph'] = ParagraphProcessor(parser)
return parser
class BlockProcessor:
""" Base class for block processors.
Each subclass will provide the methods below to work with the source and
tree. Each processor will need to define it's own ``test`` and ``run``
methods. The ``test`` method should return True or False, to indicate
whether the current block should be processed by this processor. If the
test passes, the parser will call the processors ``run`` method.
"""
def __init__(self, parser):
self.parser = parser
self.tab_length = parser.markdown.tab_length
def lastChild(self, parent):
""" Return the last child of an etree element. """
if len(parent):
return parent[-1]
else:
return None
def detab(self, text):
""" Remove a tab from the front of each line of the given text. """
newtext = []
lines = text.split('\n')
for line in lines:
if line.startswith(' '*self.tab_length):
newtext.append(line[self.tab_length:])
elif not line.strip():
newtext.append('')
else:
break
return '\n'.join(newtext), '\n'.join(lines[len(newtext):])
def looseDetab(self, text, level=1):
""" Remove a tab from front of lines but allowing dedented lines. """
lines = text.split('\n')
for i in range(len(lines)):
if lines[i].startswith(' '*self.tab_length*level):
lines[i] = lines[i][self.tab_length*level:]
return '\n'.join(lines)
def test(self, parent, block):
""" Test for block type. Must be overridden by subclasses.
As the parser loops through processors, it will call the ``test`` method
on each to determine if the given block of text is of that type. This
method must return a boolean ``True`` or ``False``. The actual method of
testing is left to the needs of that particular block type. It could
be as simple as ``block.startswith(some_string)`` or a complex regular
expression. As the block type may be different depending on the parent
of the block (i.e. inside a list), the parent etree element is also
provided and may be used as part of the test.
Keywords:
* ``parent``: A etree element which will be the parent of the block.
* ``block``: A block of text from the source which has been split at
blank lines.
"""
pass
def run(self, parent, blocks):
""" Run processor. Must be overridden by subclasses.
When the parser determines the appropriate type of a block, the parser
will call the corresponding processor's ``run`` method. This method
should parse the individual lines of the block and append them to
the etree.
Note that both the ``parent`` and ``etree`` keywords are pointers
to instances of the objects which should be edited in place. Each
processor must make changes to the existing objects as there is no
mechanism to return new/different objects to replace them.
This means that this method should be adding SubElements or adding text
to the parent, and should remove (``pop``) or add (``insert``) items to
the list of blocks.
Keywords:
* ``parent``: A etree element which is the parent of the current block.
* ``blocks``: A list of all remaining blocks of the document.
"""
pass
class ListIndentProcessor(BlockProcessor):
""" Process children of list items.
Example:
* a list item
process this part
or this part
"""
ITEM_TYPES = ['li']
LIST_TYPES = ['ul', 'ol']
def __init__(self, *args):
BlockProcessor.__init__(self, *args)
self.INDENT_RE = re.compile(r'^(([ ]{%s})+)'% self.tab_length)
def test(self, parent, block):
return block.startswith(' '*self.tab_length) and \
not self.parser.state.isstate('detabbed') and \
(parent.tag in self.ITEM_TYPES or \
(len(parent) and parent[-1] and \
(parent[-1].tag in self.LIST_TYPES)
)
)
def run(self, parent, blocks):
block = blocks.pop(0)
level, sibling = self.get_level(parent, block)
block = self.looseDetab(block, level)
self.parser.state.set('detabbed')
if parent.tag in self.ITEM_TYPES:
# It's possible that this parent has a 'ul' or 'ol' child list
# with a member. If that is the case, then that should be the
# parent. This is intended to catch the edge case of an indented
# list whose first member was parsed previous to this point
# see OListProcessor
if len(parent) and parent[-1].tag in self.LIST_TYPES:
self.parser.parseBlocks(parent[-1], [block])
else:
# The parent is already a li. Just parse the child block.
self.parser.parseBlocks(parent, [block])
elif sibling.tag in self.ITEM_TYPES:
# The sibling is a li. Use it as parent.
self.parser.parseBlocks(sibling, [block])
elif len(sibling) and sibling[-1].tag in self.ITEM_TYPES:
# The parent is a list (``ol`` or ``ul``) which has children.
# Assume the last child li is the parent of this block.
if sibling[-1].text:
# If the parent li has text, that text needs to be moved to a p
# The p must be 'inserted' at beginning of list in the event
# that other children already exist i.e.; a nested sublist.
p = util.etree.Element('p')
p.text = sibling[-1].text
sibling[-1].text = ''
sibling[-1].insert(0, p)
self.parser.parseChunk(sibling[-1], block)
else:
self.create_item(sibling, block)
self.parser.state.reset()
def create_item(self, parent, block):
""" Create a new li and parse the block with it as the parent. """
li = util.etree.SubElement(parent, 'li')
self.parser.parseBlocks(li, [block])
def get_level(self, parent, block):
""" Get level of indent based on list level. """
# Get indent level
m = self.INDENT_RE.match(block)
if m:
indent_level = len(m.group(1))/self.tab_length
else:
indent_level = 0
if self.parser.state.isstate('list'):
# We're in a tightlist - so we already are at correct parent.
level = 1
else:
# We're in a looselist - so we need to find parent.
level = 0
# Step through children of tree to find matching indent level.
while indent_level > level:
child = self.lastChild(parent)
if child and (child.tag in self.LIST_TYPES or child.tag in self.ITEM_TYPES):
if child.tag in self.LIST_TYPES:
level += 1
parent = child
else:
# No more child levels. If we're short of indent_level,
# we have a code block. So we stop here.
break
return level, parent
class CodeBlockProcessor(BlockProcessor):
""" Process code blocks. """
def test(self, parent, block):
return block.startswith(' '*self.tab_length)
def run(self, parent, blocks):
sibling = self.lastChild(parent)
block = blocks.pop(0)
theRest = ''
if sibling and sibling.tag == "pre" and len(sibling) \
and sibling[0].tag == "code":
# The previous block was a code block. As blank lines do not start
# new code blocks, append this block to the previous, adding back
# linebreaks removed from the split into a list.
code = sibling[0]
block, theRest = self.detab(block)
code.text = util.AtomicString('%s\n%s\n' % (code.text, block.rstrip()))
else:
# This is a new codeblock. Create the elements and insert text.
pre = util.etree.SubElement(parent, 'pre')
code = util.etree.SubElement(pre, 'code')
block, theRest = self.detab(block)
code.text = util.AtomicString('%s\n' % block.rstrip())
if theRest:
# This block contained unindented line(s) after the first indented
# line. Insert these lines as the first block of the master blocks
# list for future processing.
blocks.insert(0, theRest)
class BlockQuoteProcessor(BlockProcessor):
RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()] # Lines before blockquote
# Pass lines before blockquote in recursively for parsing forst.
self.parser.parseBlocks(parent, [before])
# Remove ``> `` from begining of each line.
block = '\n'.join([self.clean(line) for line in
block[m.start():].split('\n')])
sibling = self.lastChild(parent)
if sibling and sibling.tag == "blockquote":
# Previous block was a blockquote so set that as this blocks parent
quote = sibling
else:
# This is a new blockquote. Create a new parent element.
quote = util.etree.SubElement(parent, 'blockquote')
# Recursively parse block with blockquote as parent.
# change parser state so blockquotes embedded in lists use p tags
self.parser.state.set('blockquote')
self.parser.parseChunk(quote, block)
self.parser.state.reset()
def clean(self, line):
""" Remove ``>`` from beginning of a line. """
m = self.RE.match(line)
if line.strip() == ">":
return ""
elif m:
return m.group(2)
else:
return line
class OListProcessor(BlockProcessor):
""" Process ordered list blocks. """
TAG = 'ol'
# Detect an item (``1. item``). ``group(1)`` contains contents of item.
RE = re.compile(r'^[ ]{0,3}\d+\.[ ]+(.*)')
# Detect items on secondary lines. they can be of either list type.
CHILD_RE = re.compile(r'^[ ]{0,3}((\d+\.)|[*+-])[ ]+(.*)')
# Detect indented (nested) items of either type
INDENT_RE = re.compile(r'^[ ]{4,7}((\d+\.)|[*+-])[ ]+.*')
# The integer (python string) with which the lists starts (default=1)
# Eg: If list is intialized as)
# 3. Item
# The ol tag will get starts="3" attribute
STARTSWITH = '1'
# List of allowed sibling tags.
SIBLING_TAGS = ['ol', 'ul']
def test(self, parent, block):
return bool(self.RE.match(block))
def run(self, parent, blocks):
# Check fr multiple items in one block.
items = self.get_items(blocks.pop(0))
sibling = self.lastChild(parent)
if sibling and sibling.tag in self.SIBLING_TAGS:
# Previous block was a list item, so set that as parent
lst = sibling
# make sure previous item is in a p- if the item has text, then it
# it isn't in a p
if lst[-1].text:
# since it's possible there are other children for this sibling,
# we can't just SubElement the p, we need to insert it as the
# first item
p = util.etree.Element('p')
p.text = lst[-1].text
lst[-1].text = ''
lst[-1].insert(0, p)
# if the last item has a tail, then the tail needs to be put in a p
# likely only when a header is not followed by a blank line
lch = self.lastChild(lst[-1])
if lch is not None and lch.tail:
p = util.etree.SubElement(lst[-1], 'p')
p.text = lch.tail.lstrip()
lch.tail = ''
# parse first block differently as it gets wrapped in a p.
li = util.etree.SubElement(lst, 'li')
self.parser.state.set('looselist')
firstitem = items.pop(0)
self.parser.parseBlocks(li, [firstitem])
self.parser.state.reset()
elif parent.tag in ['ol', 'ul']:
# this catches the edge case of a multi-item indented list whose
# first item is in a blank parent-list item:
# * * subitem1
# * subitem2
# see also ListIndentProcessor
lst = parent
else:
# This is a new list so create parent with appropriate tag.
lst = util.etree.SubElement(parent, self.TAG)
# Check if a custom start integer is set
if not self.parser.markdown.lazy_ol and self.STARTSWITH !='1':
lst.attrib['start'] = self.STARTSWITH
self.parser.state.set('list')
# Loop through items in block, recursively parsing each with the
# appropriate parent.
for item in items:
if item.startswith(' '*self.tab_length):
# Item is indented. Parse with last item as parent
self.parser.parseBlocks(lst[-1], [item])
else:
# New item. Create li and parse with it as parent
li = util.etree.SubElement(lst, 'li')
self.parser.parseBlocks(li, [item])
self.parser.state.reset()
def get_items(self, block):
""" Break a block into list items. """
items = []
for line in block.split('\n'):
m = self.CHILD_RE.match(line)
if m:
# This is a new list item
# Check first item for the start index
if not items and self.TAG=='ol':
# Detect the integer value of first list item
INTEGER_RE = re.compile('(\d+)')
self.STARTSWITH = INTEGER_RE.match(m.group(1)).group()
# Append to the list
items.append(m.group(3))
elif self.INDENT_RE.match(line):
# This is an indented (possibly nested) item.
if items[-1].startswith(' '*self.tab_length):
# Previous item was indented. Append to that item.
items[-1] = '%s\n%s' % (items[-1], line)
else:
items.append(line)
else:
# This is another line of previous item. Append to that item.
items[-1] = '%s\n%s' % (items[-1], line)
return items
class UListProcessor(OListProcessor):
""" Process unordered list blocks. """
TAG = 'ul'
RE = re.compile(r'^[ ]{0,3}[*+-][ ]+(.*)')
class HashHeaderProcessor(BlockProcessor):
""" Process Hash Headers. """
# Detect a header at start of any line in block
RE = re.compile(r'(^|\n)(?P<level>#{1,6})(?P<header>.*?)#*(\n|$)')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()] # All lines before header
after = block[m.end():] # All lines after header
if before:
# As the header was not the first line of the block and the
# lines before the header must be parsed first,
# recursively parse this lines as a block.
self.parser.parseBlocks(parent, [before])
# Create header using named groups from RE
h = util.etree.SubElement(parent, 'h%d' % len(m.group('level')))
h.text = m.group('header').strip()
if after:
# Insert remaining lines as first block for future parsing.
blocks.insert(0, after)
else:
# This should never happen, but just in case...
logger.warn("We've got a problem header: %r" % block)
class SetextHeaderProcessor(BlockProcessor):
""" Process Setext-style Headers. """
# Detect Setext-style header. Must be first 2 lines of block.
RE = re.compile(r'^.*?\n[=-]+[ ]*(\n|$)', re.MULTILINE)
def test(self, parent, block):
return bool(self.RE.match(block))
def run(self, parent, blocks):
lines = blocks.pop(0).split('\n')
# Determine level. ``=`` is 1 and ``-`` is 2.
if lines[1].startswith('='):
level = 1
else:
level = 2
h = util.etree.SubElement(parent, 'h%d' % level)
h.text = lines[0].strip()
if len(lines) > 2:
# Block contains additional lines. Add to master blocks for later.
blocks.insert(0, '\n'.join(lines[2:]))
class HRProcessor(BlockProcessor):
""" Process Horizontal Rules. """
RE = r'^[ ]{0,3}((-+[ ]{0,2}){3,}|(_+[ ]{0,2}){3,}|(\*+[ ]{0,2}){3,})[ ]*'
# Detect hr on any line of a block.
SEARCH_RE = re.compile(RE, re.MULTILINE)
def test(self, parent, block):
m = self.SEARCH_RE.search(block)
# No atomic grouping in python so we simulate it here for performance.
# The regex only matches what would be in the atomic group - the HR.
# Then check if we are at end of block or if next char is a newline.
if m and (m.end() == len(block) or block[m.end()] == '\n'):
# Save match object on class instance so we can use it later.
self.match = m
return True
return False
def run(self, parent, blocks):
block = blocks.pop(0)
# Check for lines in block before hr.
prelines = block[:self.match.start()].rstrip('\n')
if prelines:
# Recursively parse lines before hr so they get parsed first.
self.parser.parseBlocks(parent, [prelines])
# create hr
util.etree.SubElement(parent, 'hr')
# check for lines in block after hr.
postlines = block[self.match.end():].lstrip('\n')
if postlines:
# Add lines after hr to master blocks for later parsing.
blocks.insert(0, postlines)
class EmptyBlockProcessor(BlockProcessor):
""" Process blocks that are empty or start with an empty line. """
def test(self, parent, block):
return not block or block.startswith('\n')
def run(self, parent, blocks):
block = blocks.pop(0)
filler = '\n\n'
if block:
# Starts with empty line
# Only replace a single line.
filler = '\n'
# Save the rest for later.
theRest = block[1:]
if theRest:
# Add remaining lines to master blocks for later.
blocks.insert(0, theRest)
sibling = self.lastChild(parent)
if sibling and sibling.tag == 'pre' and len(sibling) and sibling[0].tag == 'code':
# Last block is a codeblock. Append to preserve whitespace.
sibling[0].text = util.AtomicString('%s%s' % (sibling[0].text, filler))
class ParagraphProcessor(BlockProcessor):
""" Process Paragraph blocks. """
def test(self, parent, block):
return True
def run(self, parent, blocks):
block = blocks.pop(0)
if block.strip():
# Not a blank block. Add to parent, otherwise throw it away.
if self.parser.state.isstate('list'):
# The parent is a tight-list.
#
# Check for any children. This will likely only happen in a
# tight-list when a header isn't followed by a blank line.
# For example:
#
# * # Header
# Line 2 of list item - not part of header.
sibling = self.lastChild(parent)
if sibling is not None:
# Insetrt after sibling.
if sibling.tail:
sibling.tail = '%s\n%s' % (sibling.tail, block)
else:
sibling.tail = '\n%s' % block
else:
# Append to parent.text
if parent.text:
parent.text = '%s\n%s' % (parent.text, block)
else:
parent.text = block.lstrip()
else:
# Create a regular paragraph
p = util.etree.SubElement(parent, 'p')
p.text = block.lstrip()
| mit |
zvoase/django-relax | relax/couchdb/replicate.py | 1 | 4148 | # -*- coding: utf-8 -*-
import datetime
import logging
import re
import urlparse
from couchdb import client
from relax import DEFAULT_FORMATTER, json, settings
from relax.couchdb import ensure_specifier_exists, specifier_to_db, shortcuts
class ReplicationError(Exception):
def __init__(self, server_error_args):
Exception.__init__(self, 'Error in replication session.')
self.response_status = server_error_args[0][0]
self.response_reason = server_error_args[0][1][0]
self.response_body = json.loads(server_error_args[0][1][1])
class ReplicationFailure(Exception):
def __init__(self, response_headers, result):
Exception.__init__(self, 'Replication failed.')
self.response_headers = response_headers
self.result = result
def replicate_existing(source_db, target_db):
"""Replicate an existing database to another existing database."""
# Get the server from which to manage the replication.
server = shortcuts.get_server()
logger = logging.getLogger('relax.couchdb.replicate')
logger.debug('POST ' + urlparse.urljoin(server.resource.uri, '/_replicate'))
source, target = specifier_to_db(source_db), specifier_to_db(target_db)
logger.debug('Source DB: %s' % (source,))
logger.debug('Target DB: %s' % (target,))
try:
resp_headers, resp_body = server.resource.post(path='/_replicate',
content=json.dumps({'source': source, 'target': target}))
except couchdb.client.ServerError, exc:
logger.error('Replication failed.')
raise ReplicationError(exc.args)
result = resp_body['history'][0]
if resp_body['ok']:
logger.info('Replication %s... successful!' % (
resp_body['session_id'][:6],))
logger.info('Replication started: ' + result['start_time'])
logger.info('Replication finished: ' + result['end_time'])
result['start_time'] = datetime.datetime.strptime(result['start_time'],
'%a, %d %b %Y %H:%M:%S GMT')
result['end_time'] = datetime.datetime.strptime(result['end_time'],
'%a, %d %b %Y %H:%M:%S GMT')
timedelta = result['end_time'] - result['start_time']
if timedelta.days:
logger.info('Replication took %d days and %.2f seconds.' % (
timedelta.days,
timedelta.seconds + (timedelta.microseconds * (1e-6))))
else:
logger.info('Replication took %.2f seconds.' % (
timedelta.seconds + (timedelta.microseconds * (1e-6))))
# Prepare the 'result' dictionary.
result['ok'] = resp_body['ok']
result['session_id'] = resp_body['session_id']
result['source_last_seq'] = resp_body['source_last_seq']
# Info-log the number of docs read/written and checked/found.
if result['docs_read'] == 1:
docs_read = '1 document read'
else:
docs_read = '%d documents read' % (result['docs_read'],)
if result['docs_written'] == 1:
docs_written = '1 document written'
else:
docs_written = '%d documents written' % (result['docs_written'],)
if result['missing_checked'] == 1:
missing_checked = 'Checked for 1 missing document, found %d.' % (
result['missing_found'],)
else:
missing_checked = 'Checked for %d missing documents, found %d.' % (
result['missing_checked'], result['missing_found'],)
logging.info('%s, %s' % (docs_read, docs_written))
logging.info(missing_checked)
return result
else:
logger.error('Replication %s... failed.' % (
resp_body['session_id'][:6],))
result['ok'] = resp_body['ok']
result['session_id'] = resp_body['session_id']
result['source_last_seq'] = resp_body['source_last_seq']
raise ReplicationFailure(resp_headers, result)
def replicate(source_spec, target_spec):
"""Replicate one existing database to another (optionally existing) DB."""
ensure_specifier_exists(target_spec)
return replicate_existing(source_spec, target_spec) | mit |
jmerkow/VTK | ThirdParty/Twisted/twisted/plugins/cred_unix.py | 63 | 3794 | # -*- test-case-name: twisted.test.test_strcred -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Cred plugin for UNIX user accounts.
"""
from zope.interface import implements
from twisted import plugin
from twisted.cred.strcred import ICheckerFactory
from twisted.cred.checkers import ICredentialsChecker
from twisted.cred.credentials import IUsernamePassword
from twisted.cred.error import UnauthorizedLogin
from twisted.internet import defer
def verifyCryptedPassword(crypted, pw):
if crypted[0] == '$': # md5_crypt encrypted
salt = '$1$' + crypted.split('$')[2]
else:
salt = crypted[:2]
try:
import crypt
except ImportError:
crypt = None
if crypt is None:
raise NotImplementedError("cred_unix not supported on this platform")
return crypt.crypt(pw, salt) == crypted
class UNIXChecker(object):
"""
A credentials checker for a UNIX server. This will check that
an authenticating username/password is a valid user on the system.
Does not work on Windows.
Right now this supports Python's pwd and spwd modules, if they are
installed. It does not support PAM.
"""
implements(ICredentialsChecker)
credentialInterfaces = (IUsernamePassword,)
def checkPwd(self, pwd, username, password):
try:
cryptedPass = pwd.getpwnam(username)[1]
except KeyError:
return defer.fail(UnauthorizedLogin())
else:
if cryptedPass in ('*', 'x'):
# Allow checkSpwd to take over
return None
elif verifyCryptedPassword(cryptedPass, password):
return defer.succeed(username)
def checkSpwd(self, spwd, username, password):
try:
cryptedPass = spwd.getspnam(username)[1]
except KeyError:
return defer.fail(UnauthorizedLogin())
else:
if verifyCryptedPassword(cryptedPass, password):
return defer.succeed(username)
def requestAvatarId(self, credentials):
username, password = credentials.username, credentials.password
try:
import pwd
except ImportError:
pwd = None
if pwd is not None:
checked = self.checkPwd(pwd, username, password)
if checked is not None:
return checked
try:
import spwd
except ImportError:
spwd = None
if spwd is not None:
checked = self.checkSpwd(spwd, username, password)
if checked is not None:
return checked
# TODO: check_pam?
# TODO: check_shadow?
return defer.fail(UnauthorizedLogin())
unixCheckerFactoryHelp = """
This checker will attempt to use every resource available to
authenticate against the list of users on the local UNIX system.
(This does not support Windows servers for very obvious reasons.)
Right now, this includes support for:
* Python's pwd module (which checks /etc/passwd)
* Python's spwd module (which checks /etc/shadow)
Future versions may include support for PAM authentication.
"""
class UNIXCheckerFactory(object):
"""
A factory for L{UNIXChecker}.
"""
implements(ICheckerFactory, plugin.IPlugin)
authType = 'unix'
authHelp = unixCheckerFactoryHelp
argStringFormat = 'No argstring required.'
credentialInterfaces = UNIXChecker.credentialInterfaces
def generateChecker(self, argstring):
"""
This checker factory ignores the argument string. Everything
needed to generate a user database is pulled out of the local
UNIX environment.
"""
return UNIXChecker()
theUnixCheckerFactory = UNIXCheckerFactory()
| bsd-3-clause |
rackerlabs/python-proboscis | tests/unit/proboscis_example.py | 4 | 1726 | # Copyright (c) 2011 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Example module loaded by proboscis_test."""
import unittest
from proboscis import test
_data_exists = False
_tests_run = [ False, False, False ]
@test(groups=["integration"], depends_on_groups=["init"])
class RandomTestZero(unittest.TestCase):
def test_something(self):
self.assertEquals(_data_exists)
_tests_run[0] = True
@test(depends_on_groups=["integration"])
class Destroy(unittest.TestCase):
def test_destroy(self):
assert _data_exists
@test(groups=["integration"], depends_on_groups=["init"],
depends_on_classes=[RandomTestZero])
class RandomTestOne(unittest.TestCase):
def test_something(self):
assert _data_exists
_tests_run[1] = True
@test(groups=["integration"], depends_on_groups=["init"])
class RandomTestTwo(unittest.TestCase):
def test_something(self):
self.assertEquals(_data_exists)
_tests_run[2] = True
@test(groups=["init"])
class StartUp(unittest.TestCase):
def test_connect_to_db(self):
self.assertEquals(10, 10)
global _data_exists
_data_exists = True
| apache-2.0 |
samueldotj/TeeRISC-Simulator | src/arch/sparc/SparcTLB.py | 69 | 1818 | # Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
from m5.SimObject import SimObject
from m5.params import *
from BaseTLB import BaseTLB
class SparcTLB(BaseTLB):
type = 'SparcTLB'
cxx_class = 'SparcISA::TLB'
cxx_header = 'arch/sparc/tlb.hh'
size = Param.Int(64, "TLB size")
| bsd-3-clause |
x303597316/hue | desktop/core/src/desktop/management/commands/runcherrypyserver.py | 14 | 3776 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# a thirdparty project
import logging
import pprint
import sys
from django.core.management.base import BaseCommand
from desktop import conf
from desktop.lib.daemon_utils import drop_privileges_if_necessary
from django.utils.translation import ugettext as _
CPSERVER_HELP = r"""
Run Hue using the CherryPy WSGI server.
"""
CPSERVER_OPTIONS = {
'host': conf.HTTP_HOST.get(),
'port': conf.HTTP_PORT.get(),
'server_name': 'localhost',
'threads': conf.CHERRYPY_SERVER_THREADS.get(),
'daemonize': False, # supervisor does this for us
'workdir': None,
'pidfile': None,
'server_user': conf.SERVER_USER.get(),
'server_group': conf.SERVER_GROUP.get(),
'ssl_certificate': conf.SSL_CERTIFICATE.get(),
'ssl_private_key': conf.SSL_PRIVATE_KEY.get(),
'ssl_cipher_list': conf.SSL_CIPHER_LIST.get()
}
class Command(BaseCommand):
help = _("CherryPy Server for Desktop.")
args = ""
def handle(self, *args, **options):
from django.conf import settings
from django.utils import translation
if not conf.ENABLE_SERVER.get():
logging.info("Hue is configured to not start its own web server.")
sys.exit(0)
# Activate the current language, because it won't get activated later.
try:
translation.activate(settings.LANGUAGE_CODE)
except AttributeError:
pass
runcpserver(args)
def usage(self, subcommand):
return CPSERVER_HELP
def start_server(options):
"""
Start CherryPy server
"""
from desktop.lib.wsgiserver import CherryPyWSGIServer as Server
from django.core.handlers.wsgi import WSGIHandler
# Translogger wraps a WSGI app with Apache-style combined logging.
server = Server(
(options['host'], int(options['port'])),
WSGIHandler(),
int(options['threads']),
options['server_name']
)
if options['ssl_certificate'] and options['ssl_private_key']:
server.ssl_certificate = options['ssl_certificate']
server.ssl_private_key = options['ssl_private_key']
server.ssl_cipher_list = options['ssl_cipher_list']
ssl_password = conf.get_ssl_password()
if ssl_password:
server.ssl_password_cb = lambda *unused: ssl_password
try:
server.bind_server()
drop_privileges_if_necessary(options)
server.listen_and_loop()
except KeyboardInterrupt:
server.stop()
def runcpserver(argset=[], **kwargs):
# Get the options
options = CPSERVER_OPTIONS.copy()
options.update(kwargs)
for x in argset:
if "=" in x:
k, v = x.split('=', 1)
else:
k, v = x, True
options[k.lower()] = v
if "help" in options:
print CPSERVER_HELP
return
# Start the webserver
print _('starting server with options:')
pprint.pprint(options)
start_server(options)
if __name__ == '__main__':
runcpserver(sys.argv[1:])
| apache-2.0 |
vsvankhede/foursquared.eclair | util/gen_class.py | 262 | 3173 | #!/usr/bin/python
import datetime
import sys
import textwrap
import common
from xml.dom import pulldom
HEADER = """\
/**
* Copyright 2009 Joe LaPenna
*/
package com.joelapenna.foursquare.types;
%(imports)s
/**
* Auto-generated: %(timestamp)s
*
* @author Joe LaPenna (joe@joelapenna.com)
*/
public class %(type_name)s implements %(interfaces)s {
"""
GETTER = """\
public %(attribute_type)s get%(camel_name)s() {
return %(field_name)s;
}
"""
SETTER = """\
public void set%(camel_name)s(%(attribute_type)s %(attribute_name)s) {
%(field_name)s = %(attribute_name)s;
}
"""
BOOLEAN_GETTER = """\
public %(attribute_type)s %(attribute_name)s() {
return %(field_name)s;
}
"""
def main():
type_name, top_node_name, attributes = common.WalkNodesForAttributes(
sys.argv[1])
GenerateClass(type_name, attributes)
def GenerateClass(type_name, attributes):
lines = []
for attribute_name in sorted(attributes):
typ, children = attributes[attribute_name]
lines.extend(Field(attribute_name, typ).split('\n'))
lines.append('')
lines.extend(Constructor(type_name).split('\n'))
lines.append('')
# getters and setters
for attribute_name in sorted(attributes):
attribute_type, children = attributes[attribute_name]
lines.extend(Accessors(attribute_name, attribute_type).split('\n'))
print Header(type_name)
#print ' ' + '\n '.join(lines)
for line in lines:
if not line:
print line
else:
print ' ' + line
print Footer()
def AccessorReplacements(attribute_name, attribute_type):
# CamelCaseClassName
camel_name = ''.join([word.capitalize()
for word in attribute_name.split('_')])
# camelCaseLocalName
attribute_name = (camel_name[0].lower() + camel_name[1:])
# mFieldName
field_attribute_name = 'm' + camel_name
return {
'attribute_name': attribute_name,
'camel_name': camel_name,
'field_name': field_attribute_name,
'attribute_type': attribute_type
}
def Header(type_name):
interfaces = common.INTERFACES.get(type_name, common.DEFAULT_INTERFACES)
import_names = common.CLASS_IMPORTS.get(type_name,
common.DEFAULT_CLASS_IMPORTS)
if import_names:
imports = ';\n'.join(imports) + ';'
else:
imports = ''
return HEADER % {'type_name': type_name,
'interfaces': ', '.join(interfaces),
'imports': imports,
'timestamp': datetime.datetime.now()}
def Field(attribute_name, attribute_type):
"""Print the field declarations."""
replacements = AccessorReplacements(attribute_name, attribute_type)
return 'private %(attribute_type)s %(field_name)s;' % replacements
def Constructor(type_name):
return 'public %s() {\n}' % type_name
def Accessors(name, attribute_type):
"""Print the getter and setter definitions."""
replacements = AccessorReplacements(name, attribute_type)
if attribute_type == common.BOOLEAN:
return '%s\n%s' % (BOOLEAN_GETTER % replacements, SETTER % replacements)
else:
return '%s\n%s' % (GETTER % replacements, SETTER % replacements)
def Footer():
return '}'
if __name__ == '__main__':
main()
| apache-2.0 |
Teagan42/home-assistant | homeassistant/components/vultr/sensor.py | 6 | 3251 | """Support for monitoring the state of Vultr Subscriptions."""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_MONITORED_CONDITIONS, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from . import (
ATTR_CURRENT_BANDWIDTH_USED,
ATTR_PENDING_CHARGES,
CONF_SUBSCRIPTION,
DATA_VULTR,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Vultr {} {}"
MONITORED_CONDITIONS = {
ATTR_CURRENT_BANDWIDTH_USED: [
"Current Bandwidth Used",
"GB",
"mdi:chart-histogram",
],
ATTR_PENDING_CHARGES: ["Pending Charges", "US$", "mdi:currency-usd"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_SUBSCRIPTION): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(
CONF_MONITORED_CONDITIONS, default=list(MONITORED_CONDITIONS)
): vol.All(cv.ensure_list, [vol.In(MONITORED_CONDITIONS)]),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Vultr subscription (server) sensor."""
vultr = hass.data[DATA_VULTR]
subscription = config.get(CONF_SUBSCRIPTION)
name = config.get(CONF_NAME)
monitored_conditions = config.get(CONF_MONITORED_CONDITIONS)
if subscription not in vultr.data:
_LOGGER.error("Subscription %s not found", subscription)
return
sensors = []
for condition in monitored_conditions:
sensors.append(VultrSensor(vultr, subscription, condition, name))
add_entities(sensors, True)
class VultrSensor(Entity):
"""Representation of a Vultr subscription sensor."""
def __init__(self, vultr, subscription, condition, name):
"""Initialize a new Vultr sensor."""
self._vultr = vultr
self._condition = condition
self._name = name
self.subscription = subscription
self.data = None
condition_info = MONITORED_CONDITIONS[condition]
self._condition_name = condition_info[0]
self._units = condition_info[1]
self._icon = condition_info[2]
@property
def name(self):
"""Return the name of the sensor."""
try:
return self._name.format(self._condition_name)
except IndexError:
try:
return self._name.format(self.data["label"], self._condition_name)
except (KeyError, TypeError):
return self._name
@property
def icon(self):
"""Return the icon used in the frontend if any."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the unit of measurement to present the value in."""
return self._units
@property
def state(self):
"""Return the value of this given sensor type."""
try:
return round(float(self.data.get(self._condition)), 2)
except (TypeError, ValueError):
return self.data.get(self._condition)
def update(self):
"""Update state of sensor."""
self._vultr.update()
self.data = self._vultr.data[self.subscription]
| apache-2.0 |
christianurich/VIBe2UrbanSim | 3rdparty/opus/src/psrc/zone/trip_weighted_average_generalized_cost_hbw_from_home_am_drive_alone.py | 2 | 3979 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from psrc.zone.abstract_trip_weighted_average_variable_from_home import Abstract_Trip_Weighted_Average_Variable_From_Home
class trip_weighted_average_generalized_cost_hbw_from_home_am_drive_alone(Abstract_Trip_Weighted_Average_Variable_From_Home):
""" Trip weighted average generalized cost from home to any workplace for
home-based-work am trips by auto.
"""
def __init__(self):
Abstract_Trip_Weighted_Average_Variable_From_Home.__init__(self, time_attribute_name = "single_vehicle_to_work_travel_cost",
trips_attribute_name = "am_pk_period_drive_alone_vehicle_trips")
from numpy import array
from numpy import ma
from opus_core.tests import opus_unittest
from opus_core.datasets.dataset_pool import DatasetPool
from opus_core.storage_factory import StorageFactory
class Tests(opus_unittest.OpusTestCase):
variable_name = "psrc.zone.trip_weighted_average_generalized_cost_hbw_from_home_am_drive_alone"
def test_my_inputs(self):
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(
table_name='zones',
table_data={
'zone_id': array([1, 2]),
}
)
storage.write_table(
table_name='travel_data',
table_data={
"from_zone_id": array([1,1,2,2]),
'to_zone_id': array([1,2,1,2]),
"single_vehicle_to_work_travel_cost":array([1.1, 2.2, 3.3, 4.4]),
"am_pk_period_drive_alone_vehicle_trips":array([1.0, 2.0, 3.0, 4.0]),
}
)
dataset_pool = DatasetPool(package_order=['urbansim'],
storage=storage)
zone = dataset_pool.get_dataset('zone')
zone.compute_variables(self.variable_name,
dataset_pool=dataset_pool)
values = zone.get_attribute(self.variable_name)
should_be = array([(1.1*1.0 +2.2*2.0)/(3.0),
(3.3*3.0 + 4.4*4.0)/(7.0)])
self.assert_(ma.allclose(values, should_be, rtol=1e-7),
msg="Error in " + self.variable_name)
def test_with_zero_denominator(self):
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(
table_name='zones',
table_data={
'zone_id': array([1, 2, 3, 4]),
}
)
storage.write_table(
table_name='travel_data',
table_data={
"from_zone_id":array([1,2,2,3,4]),
"to_zone_id":array([1,2,1,2,2]),
"single_vehicle_to_work_travel_cost":array([1.1, 2.2, 3.3, 4.4, 5.5]),
"am_pk_period_drive_alone_vehicle_trips":array([10.1, 20.0, 30.0, 0.0, 0.0]),
}
)
dataset_pool = DatasetPool(package_order=['urbansim'],
storage=storage)
zone = dataset_pool.get_dataset('zone')
zone.compute_variables(self.variable_name,
dataset_pool=dataset_pool)
values = zone.get_attribute(self.variable_name)
should_be = array([(1.1*10.1)/(10.1),
(2.2*20.0 + 3.3*30)/(20.0+30.0),
(2.2*20.0 + 3.3*30)/(20.0+30.0),# when denominator = 0, use prior good value
(2.2*20.0 + 3.3*30)/(20.0+30.0)])# when denominator = 0, use prior good value
self.assert_(ma.allclose(values, should_be, rtol=1e-7),
msg="Error in " + self.variable_name)
if __name__=='__main__':
opus_unittest.main() | gpl-2.0 |
citizennerd/VivaCity | semanticizer/models.py | 1 | 4582 | from django.db import models
from postdoc.models import *
from semanticizer.formats import *
from django.contrib.auth.models import User
class DataSetFormat(models.Model):
name = models.CharField(max_length=30)
module = models.CharField(max_length=100)
geographic = models.BooleanField()
configuration_requirements = models.TextField()
is_api = models.BooleanField(default=False)
time_explicit = models.BooleanField()
time_implicit = models.BooleanField()
def __str__(self):
return self.name
class DSFAlias(models.Model):
name = models.CharField(max_length=255, unique=True)
dsf = models.ForeignKey(DataSetFormat)
def __str__(self):
return "%s => %s" % (self.name, self.dsf)
def create_dataset(structure):
ds = DataSet()
file = structure['url']
if structure['format'] != "" and DSFAlias.objects.filter(name = structure['format']).count() > 0:
format = DSFAlias.objects.get(name=structure['format'])
class DataSet(models.Model):
owner = models.ForeignKey(User)
file = models.URLField()
format = models.ForeignKey(DataSetFormat, null=True, blank=True)
format_configuration = models.TextField(null=True, blank=True)
refresh_period = models.TextField(blank = True, null=True)
private = models.BooleanField(default=False)
publication_aggregation = models.TextField()
def __str__(self):
return self.file
def save(self, *args, **kwargs):
super(DataSet,self).save()
model = self.format.module
fmat = get_adapter(model)()
DataSetColumn.objects.filter(dataset = self).delete()
for column in fmat.extract_columns(self.file):
d = DataSetColumn()
d.dataset = self
d.name = column
d.save()
class DataSetColumn(models.Model):
dataset = models.ForeignKey(DataSet, related_name="columns")
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Meta:
unique_together = ('dataset', 'name')
class Semantics(models.Model):
dataset = models.ForeignKey(DataSet, related_name="semantics")
data_model = models.ForeignKey(DataModel)
def __str__(self):
return "%s => %s" % (self.dataset.file, self.data_model.name)
class SemanticsSpecification(models.Model):
semantics = models.ForeignKey(Semantics, related_name="associations")
attribute = models.ForeignKey(DataModelAttribute)
via = models.ForeignKey('SemanticsSpecificationPath', blank=True, null=True, related_name="path")
column = models.CharField(max_length=500,blank = True, null=True)
data_transformation = models.CharField(max_length=200)
def __str__(self):
if self.via is not None:
return "%s: %s => %s via %s" % (self.semantics.dataset.file, self.column, self.attribute.name, self.via)
return "%s: %s => %s" % (self.semantics.dataset.file, self.column, self.attribute.name)
class SemanticsSpecificationPath(models.Model):
attribute = models.ForeignKey(DataModelAttribute)
next = models.ForeignKey('SemanticsSpecificationPath',blank = True, null=True, related_name="previous")
def __str__(self):
att = self.next
li = [str(self.attribute)]
while att is not None:
li.append(str(att.attribute))
att = att.next
return str(",".join(li))
@property
def list_ids(self):
att = self.next
li = [self.attribute.id]
while att is not None:
li.append(att.attribute.id)
att = att.next
return li
class GeoSemanticsSpecification(models.Model):
semantics = models.ForeignKey(Semantics, related_name="geo_associations")
column = models.CharField(max_length=500,blank = True, null=True)
is_geo_x = models.BooleanField()
is_geo_y = models.BooleanField()
geocode_address = models.BooleanField()
data_transformation = models.CharField(max_length=200)
class TimeSpanType(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class TimeSemanticsSpecification(models.Model):
semantics = models.ForeignKey(Semantics, related_name="time_associations")
absolute_value = models.DateTimeField(blank=True, null=True)
column = models.CharField(max_length=500,blank = True, null=True)
format = models.CharField(max_length=200)
data_transformation = models.CharField(max_length=200)
timespan_type = models.ForeignKey(TimeSpanType)
| mit |
arangodb/fuerte | 3rdParty/googletest/googletest/test/gtest_xml_test_utils.py | 364 | 8872 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for gtest_xml_output"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import re
from xml.dom import minidom, Node
import gtest_test_utils
GTEST_OUTPUT_FLAG = '--gtest_output'
GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.xml'
class GTestXMLTestCase(gtest_test_utils.TestCase):
"""
Base class for tests of Google Test's XML output functionality.
"""
def AssertEquivalentNodes(self, expected_node, actual_node):
"""
Asserts that actual_node (a DOM node object) is equivalent to
expected_node (another DOM node object), in that either both of
them are CDATA nodes and have the same value, or both are DOM
elements and actual_node meets all of the following conditions:
* It has the same tag name as expected_node.
* It has the same set of attributes as expected_node, each with
the same value as the corresponding attribute of expected_node.
Exceptions are any attribute named "time", which needs only be
convertible to a floating-point number and any attribute named
"type_param" which only has to be non-empty.
* It has an equivalent set of child nodes (including elements and
CDATA sections) as expected_node. Note that we ignore the
order of the children as they are not guaranteed to be in any
particular order.
"""
if expected_node.nodeType == Node.CDATA_SECTION_NODE:
self.assertEquals(Node.CDATA_SECTION_NODE, actual_node.nodeType)
self.assertEquals(expected_node.nodeValue, actual_node.nodeValue)
return
self.assertEquals(Node.ELEMENT_NODE, actual_node.nodeType)
self.assertEquals(Node.ELEMENT_NODE, expected_node.nodeType)
self.assertEquals(expected_node.tagName, actual_node.tagName)
expected_attributes = expected_node.attributes
actual_attributes = actual_node .attributes
self.assertEquals(
expected_attributes.length, actual_attributes.length,
'attribute numbers differ in element %s:\nExpected: %r\nActual: %r' % (
actual_node.tagName, expected_attributes.keys(),
actual_attributes.keys()))
for i in range(expected_attributes.length):
expected_attr = expected_attributes.item(i)
actual_attr = actual_attributes.get(expected_attr.name)
self.assert_(
actual_attr is not None,
'expected attribute %s not found in element %s' %
(expected_attr.name, actual_node.tagName))
self.assertEquals(
expected_attr.value, actual_attr.value,
' values of attribute %s in element %s differ: %s vs %s' %
(expected_attr.name, actual_node.tagName,
expected_attr.value, actual_attr.value))
expected_children = self._GetChildren(expected_node)
actual_children = self._GetChildren(actual_node)
self.assertEquals(
len(expected_children), len(actual_children),
'number of child elements differ in element ' + actual_node.tagName)
for child_id, child in expected_children.items():
self.assert_(child_id in actual_children,
'<%s> is not in <%s> (in element %s)' %
(child_id, actual_children, actual_node.tagName))
self.AssertEquivalentNodes(child, actual_children[child_id])
identifying_attribute = {
'testsuites': 'name',
'testsuite': 'name',
'testcase': 'name',
'failure': 'message',
}
def _GetChildren(self, element):
"""
Fetches all of the child nodes of element, a DOM Element object.
Returns them as the values of a dictionary keyed by the IDs of the
children. For <testsuites>, <testsuite> and <testcase> elements, the ID
is the value of their "name" attribute; for <failure> elements, it is
the value of the "message" attribute; CDATA sections and non-whitespace
text nodes are concatenated into a single CDATA section with ID
"detail". An exception is raised if any element other than the above
four is encountered, if two child elements with the same identifying
attributes are encountered, or if any other type of node is encountered.
"""
children = {}
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.assert_(child.tagName in self.identifying_attribute,
'Encountered unknown element <%s>' % child.tagName)
childID = child.getAttribute(self.identifying_attribute[child.tagName])
self.assert_(childID not in children)
children[childID] = child
elif child.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]:
if 'detail' not in children:
if (child.nodeType == Node.CDATA_SECTION_NODE or
not child.nodeValue.isspace()):
children['detail'] = child.ownerDocument.createCDATASection(
child.nodeValue)
else:
children['detail'].nodeValue += child.nodeValue
else:
self.fail('Encountered unexpected node type %d' % child.nodeType)
return children
def NormalizeXml(self, element):
"""
Normalizes Google Test's XML output to eliminate references to transient
information that may change from run to run.
* The "time" attribute of <testsuites>, <testsuite> and <testcase>
elements is replaced with a single asterisk, if it contains
only digit characters.
* The "timestamp" attribute of <testsuites> elements is replaced with a
single asterisk, if it contains a valid ISO8601 datetime value.
* The "type_param" attribute of <testcase> elements is replaced with a
single asterisk (if it sn non-empty) as it is the type name returned
by the compiler and is platform dependent.
* The line info reported in the first line of the "message"
attribute and CDATA section of <failure> elements is replaced with the
file's basename and a single asterisk for the line number.
* The directory names in file paths are removed.
* The stack traces are removed.
"""
if element.tagName == 'testsuites':
timestamp = element.getAttributeNode('timestamp')
timestamp.value = re.sub(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d$',
'*', timestamp.value)
if element.tagName in ('testsuites', 'testsuite', 'testcase'):
time = element.getAttributeNode('time')
time.value = re.sub(r'^\d+(\.\d+)?$', '*', time.value)
type_param = element.getAttributeNode('type_param')
if type_param and type_param.value:
type_param.value = '*'
elif element.tagName == 'failure':
source_line_pat = r'^.*[/\\](.*:)\d+\n'
# Replaces the source line information with a normalized form.
message = element.getAttributeNode('message')
message.value = re.sub(source_line_pat, '\\1*\n', message.value)
for child in element.childNodes:
if child.nodeType == Node.CDATA_SECTION_NODE:
# Replaces the source line information with a normalized form.
cdata = re.sub(source_line_pat, '\\1*\n', child.nodeValue)
# Removes the actual stack trace.
child.nodeValue = re.sub(r'\nStack trace:\n(.|\n)*',
'', cdata)
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.NormalizeXml(child)
| apache-2.0 |
Asure/Dropad-kernel-2.6.32.9 | arch/ia64/scripts/unwcheck.py | 916 | 1718 | #!/usr/bin/env python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
stefanv/scipy3 | scipy/weave/examples/vq.py | 12 | 9251 | """
"""
# C:\home\ej\wrk\scipy\weave\examples>python vq.py
# vq with 1000 observation, 10 features and 30 codes fo 100 iterations
# speed in python: 0.150119999647
# [25 29] [ 2.49147266 3.83021032]
# speed in standard c: 0.00710999965668
# [25 29] [ 2.49147266 3.83021032]
# speed up: 21.11
# speed inline/blitz: 0.0186300003529
# [25 29] [ 2.49147272 3.83021021]
# speed up: 8.06
# speed inline/blitz2: 0.00461000084877
# [25 29] [ 2.49147272 3.83021021]
# speed up: 32.56
from numpy import *
import sys
sys.path.insert(0,'..')
import scipy.weave.inline_tools as inline_tools
import scipy.weave.converters as converters
blitz_type_converters = converters.blitz
import scipy.weave.c_spec as c_spec
def vq(obs,code_book):
# make sure we're looking at arrays.
obs = asarray(obs)
code_book = asarray(code_book)
# check for 2d arrays and compatible sizes.
obs_sh = shape(obs)
code_book_sh = shape(code_book)
assert(len(obs_sh) == 2 and len(code_book_sh) == 2)
assert(obs_sh[1] == code_book_sh[1])
type = c_spec.num_to_c_types[obs.typecode()]
# band aid for now.
ar_type = 'PyArray_FLOAT'
code = """
#line 37 "vq.py"
// Use tensor notation.
blitz::Array<%(type)s,2> dist_sq(Ncode_book[0],Nobs[0]);
blitz::firstIndex i;
blitz::secondIndex j;
blitz::thirdIndex k;
dist_sq = sum(pow2(obs(j,k) - code_book(i,k)),k);
// Surely there is a better way to do this...
PyArrayObject* py_code = (PyArrayObject*) PyArray_FromDims(1,&Nobs[0],PyArray_LONG);
blitz::Array<int,1> code((int*)(py_code->data),
blitz::shape(Nobs[0]), blitz::neverDeleteData);
code = minIndex(dist_sq(j,i),j);
PyArrayObject* py_min_dist = (PyArrayObject*) PyArray_FromDims(1,&Nobs[0],PyArray_FLOAT);
blitz::Array<float,1> min_dist((float*)(py_min_dist->data),
blitz::shape(Nobs[0]), blitz::neverDeleteData);
min_dist = sqrt(min(dist_sq(j,i),j));
py::tuple results(2);
results[0] = py_code;
results[1] = py_min_dist;
return_val = results;
""" % locals()
code, distortion = inline_tools.inline(code,['obs','code_book'],
type_converters = blitz_type_converters,
compiler = 'gcc',
verbose = 1)
return code, distortion
def vq2(obs,code_book):
""" doesn't use blitz (except in conversion)
ALSO DOES NOT HANDLE STRIDED ARRAYS CORRECTLY
"""
# make sure we're looking at arrays.
obs = asarray(obs)
code_book = asarray(code_book)
# check for 2d arrays and compatible sizes.
obs_sh = shape(obs)
code_book_sh = shape(code_book)
assert(len(obs_sh) == 2 and len(code_book_sh) == 2)
assert(obs_sh[1] == code_book_sh[1])
assert(obs.typecode() == code_book.typecode())
type = c_spec.num_to_c_types[obs.typecode()]
# band aid for now.
ar_type = 'PyArray_FLOAT'
code = """
#line 83 "vq.py"
// THIS DOES NOT HANDLE STRIDED ARRAYS CORRECTLY
// Surely there is a better way to do this...
PyArrayObject* py_code = (PyArrayObject*) PyArray_FromDims(1,&Nobs[0],PyArray_LONG);
PyArrayObject* py_min_dist = (PyArrayObject*) PyArray_FromDims(1,&Nobs[0],PyArray_FLOAT);
int* raw_code = (int*)(py_code->data);
float* raw_min_dist = (float*)(py_min_dist->data);
%(type)s* raw_obs = obs.data();
%(type)s* raw_code_book = code_book.data();
%(type)s* this_obs = NULL;
%(type)s* this_code = NULL;
int Nfeatures = Nobs[1];
float diff,dist;
for(int i=0; i < Nobs[0]; i++)
{
this_obs = &raw_obs[i*Nfeatures];
raw_min_dist[i] = (%(type)s)10000000.; // big number
for(int j=0; j < Ncode_book[0]; j++)
{
this_code = &raw_code_book[j*Nfeatures];
dist = 0;
for(int k=0; k < Nfeatures; k++)
{
diff = this_obs[k] - this_code[k];
dist += diff*diff;
}
dist = dist;
if (dist < raw_min_dist[i])
{
raw_code[i] = j;
raw_min_dist[i] = dist;
}
}
raw_min_dist[i] = sqrt(raw_min_dist[i]);
}
py::tuple results(2);
results[0] = py_code;
results[1] = py_min_dist;
return_val = results;
""" % locals()
code, distortion = inline_tools.inline(code,['obs','code_book'],
type_converters = blitz_type_converters,
compiler = 'gcc',
verbose = 1)
return code, distortion
def vq3(obs,code_book):
""" Uses standard array conversion completely bi-passing blitz.
THIS DOES NOT HANDLE STRIDED ARRAYS CORRECTLY
"""
# make sure we're looking at arrays.
obs = asarray(obs)
code_book = asarray(code_book)
# check for 2d arrays and compatible sizes.
obs_sh = shape(obs)
code_book_sh = shape(code_book)
assert(len(obs_sh) == 2 and len(code_book_sh) == 2)
assert(obs_sh[1] == code_book_sh[1])
assert(obs.typecode() == code_book.typecode())
type = c_spec.num_to_c_types[obs.typecode()]
code = """
#line 139 "vq.py"
// Surely there is a better way to do this...
PyArrayObject* py_code = (PyArrayObject*) PyArray_FromDims(1,&Nobs[0],PyArray_LONG);
PyArrayObject* py_min_dist = (PyArrayObject*) PyArray_FromDims(1,&Nobs[0],PyArray_FLOAT);
int* code_data = (int*)(py_code->data);
float* min_dist_data = (float*)(py_min_dist->data);
%(type)s* this_obs = NULL;
%(type)s* this_code = NULL;
int Nfeatures = Nobs[1];
float diff,dist;
for(int i=0; i < Nobs[0]; i++)
{
this_obs = &obs_data[i*Nfeatures];
min_dist_data[i] = (float)10000000.; // big number
for(int j=0; j < Ncode_book[0]; j++)
{
this_code = &code_book_data[j*Nfeatures];
dist = 0;
for(int k=0; k < Nfeatures; k++)
{
diff = this_obs[k] - this_code[k];
dist += diff*diff;
}
if (dist < min_dist_data[i])
{
code_data[i] = j;
min_dist_data[i] = dist;
}
}
min_dist_data[i] = sqrt(min_dist_data[i]);
}
py::tuple results(2);
results[0] = py_code;
results[1] = py_min_dist;
return_val = results;
""" % locals()
# this is an unpleasant way to specify type factories -- work on it.
import ext_tools
code, distortion = inline_tools.inline(code,['obs','code_book'])
return code, distortion
import time
import RandomArray
def compare(m,Nobs,Ncodes,Nfeatures):
obs = RandomArray.normal(0.,1.,(Nobs,Nfeatures))
codes = RandomArray.normal(0.,1.,(Ncodes,Nfeatures))
import scipy.cluster.vq
scipy.cluster.vq
print 'vq with %d observation, %d features and %d codes for %d iterations' % \
(Nobs,Nfeatures,Ncodes,m)
t1 = time.time()
for i in range(m):
code,dist = scipy.cluster.vq.py_vq(obs,codes)
t2 = time.time()
py = (t2-t1)
print ' speed in python:', (t2 - t1)/m
print code[:2],dist[:2]
t1 = time.time()
for i in range(m):
code,dist = scipy.cluster.vq.vq(obs,codes)
t2 = time.time()
print ' speed in standard c:', (t2 - t1)/m
print code[:2],dist[:2]
print ' speed up: %3.2f' % (py/(t2-t1))
# load into cache
b = vq(obs,codes)
t1 = time.time()
for i in range(m):
code,dist = vq(obs,codes)
t2 = time.time()
print ' speed inline/blitz:',(t2 - t1)/ m
print code[:2],dist[:2]
print ' speed up: %3.2f' % (py/(t2-t1))
# load into cache
b = vq2(obs,codes)
t1 = time.time()
for i in range(m):
code,dist = vq2(obs,codes)
t2 = time.time()
print ' speed inline/blitz2:',(t2 - t1)/ m
print code[:2],dist[:2]
print ' speed up: %3.2f' % (py/(t2-t1))
# load into cache
b = vq3(obs,codes)
t1 = time.time()
for i in range(m):
code,dist = vq3(obs,codes)
t2 = time.time()
print ' speed using C arrays:',(t2 - t1)/ m
print code[:2],dist[:2]
print ' speed up: %3.2f' % (py/(t2-t1))
if __name__ == "__main__":
compare(100,1000,30,10)
#compare(1,10,2,10)
| bsd-3-clause |
akbargumbira/inasafe | safe/common/utilities.py | 3 | 20750 | # coding=utf-8
"""Utilities for InaSAFE."""
import os
import sys
import platform
from datetime import date
import getpass
from tempfile import mkstemp
from subprocess import PIPE, Popen
import ctypes
from numbers import Integral
import math
import colorsys
# pylint: disable=unused-import
from collections import OrderedDict
# pylint: enable=unused-import
from qgis.core import (
QgsCoordinateReferenceSystem,
QgsCoordinateTransform,
QgsGeometry,
QgsPoint)
from safe.common.exceptions import VerificationError
from safe.utilities.rounding import (
thousand_separator, decimal_separator, add_separators)
import logging
LOGGER = logging.getLogger('InaSAFE')
class MEMORYSTATUSEX(ctypes.Structure):
"""This class is used for getting the free memory on Windows."""
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong)]
def __init__(self):
# have to initialize this to the size of MEMORYSTATUSEX
self.dwLength = ctypes.sizeof(self)
super(MEMORYSTATUSEX, self).__init__()
def verify(statement, message=None):
"""Verification of logical statement similar to assertions.
:param statement: Expression
:type statement: type, bool
:param message: error message in case statement evaluates as False
:type message: str
:raises: VerificationError
"""
if bool(statement) is False:
# noinspection PyExceptionInherit
raise VerificationError(message)
def safe_dir(sub_dir=None):
"""Absolute path from safe package directory.
:param sub_dir: Sub directory relative to safe package directory.
:type sub_dir: str
:return: The Absolute path.
:rtype: str
"""
safe_relative_path = os.path.join(
os.path.dirname(__file__), '../')
return os.path.abspath(
os.path.join(safe_relative_path, sub_dir))
def temp_dir(sub_dir='work'):
"""Obtain the temporary working directory for the operating system.
An inasafe subdirectory will automatically be created under this and
if specified, a user subdirectory under that.
.. note:: You can use this together with unique_filename to create
a file in a temporary directory under the inasafe workspace. e.g.
tmpdir = temp_dir('testing')
tmpfile = unique_filename(dir=tmpdir)
print tmpfile
/tmp/inasafe/23-08-2012/timlinux/testing/tmpMRpF_C
If you specify INASAFE_WORK_DIR as an environment var, it will be
used in preference to the system temp directory.
:param sub_dir: Optional argument which will cause an additional
subdirectory to be created e.g. /tmp/inasafe/foo/
:type sub_dir: str
:return: Path to the temp dir that is created.
:rtype: str
:raises: Any errors from the underlying system calls.
"""
user = getpass.getuser().replace(' ', '_')
current_date = date.today()
date_string = current_date.isoformat()
if 'INASAFE_WORK_DIR' in os.environ:
new_directory = os.environ['INASAFE_WORK_DIR']
else:
# Following 4 lines are a workaround for tempfile.tempdir()
# unreliabilty
handle, filename = mkstemp()
os.close(handle)
new_directory = os.path.dirname(filename)
os.remove(filename)
path = os.path.join(new_directory, 'inasafe', date_string, user, sub_dir)
if not os.path.exists(path):
# Ensure that the dir is world writable
# Umask sets the new mask and returns the old
old_mask = os.umask(0000)
os.makedirs(path, 0777)
# Reinstate the old mask for tmp
os.umask(old_mask)
return path
def unique_filename(**kwargs):
"""Create new filename guaranteed not to exist previously
Use mkstemp to create the file, then remove it and return the name
If dir is specified, the tempfile will be created in the path specified
otherwise the file will be created in a directory following this scheme:
:file:'/tmp/inasafe/<dd-mm-yyyy>/<user>/impacts'
See http://docs.python.org/library/tempfile.html for details.
Example usage:
tempdir = temp_dir(sub_dir='test')
filename = unique_filename(suffix='.foo', dir=tempdir)
print filename
/tmp/inasafe/23-08-2012/timlinux/test/tmpyeO5VR.foo
Or with no preferred subdir, a default subdir of 'impacts' is used:
filename = unique_filename(suffix='.shp')
print filename
/tmp/inasafe/23-08-2012/timlinux/impacts/tmpoOAmOi.shp
"""
if 'dir' not in kwargs:
path = temp_dir('impacts')
kwargs['dir'] = path
else:
path = temp_dir(kwargs['dir'])
kwargs['dir'] = path
if not os.path.exists(kwargs['dir']):
# Ensure that the dir mask won't conflict with the mode
# Umask sets the new mask and returns the old
umask = os.umask(0000)
# Ensure that the dir is world writable by explicitly setting mode
os.makedirs(kwargs['dir'], 0777)
# Reinstate the old mask for tmp dir
os.umask(umask)
# Now we have the working dir set up go on and return the filename
handle, filename = mkstemp(**kwargs)
# Need to close it using the file handle first for windows!
os.close(handle)
try:
os.remove(filename)
except OSError:
pass
return filename
def get_free_memory():
"""Return current free memory on the machine.
Currently supported for Windows, Linux, MacOS.
:returns: Free memory in MB unit
:rtype: int
"""
if 'win32' in sys.platform:
# windows
return get_free_memory_win()
elif 'linux2' in sys.platform:
# linux
return get_free_memory_linux()
elif 'darwin' in sys.platform:
# mac
return get_free_memory_osx()
def get_free_memory_win():
"""Return current free memory on the machine for windows.
Warning : this script is really not robust
Return in MB unit
"""
stat = MEMORYSTATUSEX()
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
return int(stat.ullAvailPhys / 1024 / 1024)
def get_free_memory_linux():
"""Return current free memory on the machine for linux.
Warning : this script is really not robust
Return in MB unit
"""
try:
p = Popen('free -m', shell=True, stdout=PIPE)
stdout_string = p.communicate()[0].split('\n')[2]
except OSError:
raise OSError
stdout_list = stdout_string.split(' ')
stdout_list = [x for x in stdout_list if x != '']
return int(stdout_list[3])
def get_free_memory_osx():
"""Return current free memory on the machine for mac os.
Warning : this script is really not robust
Return in MB unit
"""
try:
p = Popen('echo -e "\n$(top -l 1 | awk \'/PhysMem/\';)\n"',
shell=True, stdout=PIPE)
stdout_string = p.communicate()[0].split('\n')[1]
# e.g. output (its a single line) OSX 10.9 Mavericks
# PhysMem: 6854M used (994M wired), 1332M unused.
# output on Mountain lion
# PhysMem: 1491M wired, 3032M active, 1933M inactive,
# 6456M used, 1735M free.
except OSError:
raise OSError
platform_version = platform.mac_ver()[0]
# Might get '10.9.1' so strop off the last no
parts = platform_version.split('.')
platform_version = parts[0] + parts[1]
# We make version a int by concatenating the two parts
# so that we can successfully determine that 10.10 (release version)
# is greater than e.g. 10.8 (release version)
# 1010 vs 108
platform_version = int(platform_version)
if platform_version > 108:
stdout_list = stdout_string.split(',')
unused = stdout_list[1].replace('M unused', '').replace(' ', '')
unused = unused.replace('.', '')
return int(unused)
else:
stdout_list = stdout_string.split(',')
inactive = stdout_list[2].replace('M inactive', '').replace(' ', '')
free = stdout_list[4].replace('M free.', '').replace(' ', '')
return int(inactive) + int(free)
def humanize_min_max(min_value, max_value, interval):
"""Return humanize value format for max and min.
If the range between the max and min is less than one, the original
value will be returned.
:param min_value: Minimum value
:type min_value: int, float
:param max_value: Maximim value
:type max_value: int, float
:param interval: The interval between classes in the
class list where the results will be used.
:type interval: float, int
:returns: A two-tuple consisting of a string for min_value and a string for
max_value.
:rtype: tuple
"""
current_interval = max_value - min_value
if interval > 1:
# print 'case 1. Current interval : ', current_interval
humanize_min_value = add_separators(int(round(min_value)))
humanize_max_value = add_separators(int(round(max_value)))
else:
# print 'case 2. Current interval : ', current_interval
humanize_min_value = format_decimal(current_interval, min_value)
humanize_max_value = format_decimal(current_interval, max_value)
return humanize_min_value, humanize_max_value
def format_decimal(interval, value):
"""Return formatted decimal according to interval decimal place
For example:
interval = 0.33 (two decimal places)
my_float = 1.1215454
Return 1.12 (return only two decimal places as string)
If interval is an integer return integer part of my_number
If my_number is an integer return as is
"""
interval = get_significant_decimal(interval)
if isinstance(interval, Integral) or isinstance(value, Integral):
return add_separators(int(value))
if interval != interval:
# nan
return str(value)
if value != value:
# nan
return str(value)
decimal_places = len(str(interval).split('.')[1])
my_number_int = str(value).split('.')[0]
my_number_decimal = str(value).split('.')[1][:decimal_places]
if len(set(my_number_decimal)) == 1 and my_number_decimal[-1] == '0':
return my_number_int
return (add_separators(int(my_number_int)) + decimal_separator() +
my_number_decimal)
def get_significant_decimal(my_decimal):
"""Return a truncated decimal by last three digit after leading zero."""
if isinstance(my_decimal, Integral):
return my_decimal
if my_decimal != my_decimal:
# nan
return my_decimal
my_int_part = str(my_decimal).split('.')[0]
my_decimal_part = str(my_decimal).split('.')[1]
first_not_zero = 0
for i in xrange(len(my_decimal_part)):
if my_decimal_part[i] == '0':
continue
else:
first_not_zero = i
break
my_truncated_decimal = my_decimal_part[:first_not_zero + 3]
# rounding
my_leftover_number = my_decimal_part[:first_not_zero + 3:]
my_leftover_number = int(float('0.' + my_leftover_number))
round_up = False
if my_leftover_number == 1:
round_up = True
my_truncated = float(my_int_part + '.' + my_truncated_decimal)
if round_up:
my_bonus = 1 * 10 ^ (-(first_not_zero + 4))
my_truncated += my_bonus
return my_truncated
def humanize_class(my_classes):
"""Return humanize interval of an array.
For example::
Original Array: Result:
1.1 - 5754.1 0 - 1
5754.1 - 11507.1 1 - 5,754
5,754 - 11,507
Original Array: Result:
0.1 - 0.5 0 - 0.1
0.5 - 0.9 0.1 - 0.5
0.5 - 0.9
Original Array: Result:
7.1 - 7.5 0 - 7.1
7.5 - 7.9 7.1 - 7.5
7.5 - 7.9
Original Array: Result:
6.1 - 7.2 0 - 6
7.2 - 8.3 6 - 7
8.3 - 9.4 7 - 8
8 - 9
"""
min_value = 0
if min_value - my_classes[0] == 0:
if len(my_classes) == 1:
return [('0', '0')]
else:
return humanize_class(my_classes[1:])
humanize_classes = []
interval = my_classes[-1] - my_classes[-2]
for max_value in my_classes:
humanize_classes.append(
humanize_min_max(min_value, max_value, interval))
min_value = max_value
try:
if humanize_classes[-1][0] == humanize_classes[-1][-1]:
return unhumanize_class(my_classes)
except IndexError:
continue
return humanize_classes
def unhumanize_class(my_classes):
"""Return class as interval without formatting."""
result = []
interval = my_classes[-1] - my_classes[-2]
min_value = 0
for max_value in my_classes:
result.append((format_decimal(interval, min_value),
format_decimal(interval, max_value)))
min_value = max_value
return result
def unhumanize_number(number):
"""Return number without formatting.
If something goes wrong in the conversion just return the passed number
We catch AttributeError in case the number has no replace method which
means it is not a string but already an int or float
We catch ValueError if number is a sting but not parseable to a number
like the 'no data' case
@param number:
"""
try:
number = number.replace(thousand_separator(), '')
number = int(float(number))
except (AttributeError, ValueError):
pass
return number
def create_label(label_tuple, extra_label=None):
"""Return a label based on my_tuple (a,b) and extra label.
a and b are string.
The output will be something like:
[a - b] extra_label
"""
if extra_label is not None:
return '[' + ' - '.join(label_tuple) + '] ' + str(extra_label)
else:
return '[' + ' - '.join(label_tuple) + ']'
def get_utm_zone(longitude):
"""Return utm zone."""
zone = int((math.floor((longitude + 180.0) / 6.0) + 1) % 60)
if zone == 0:
zone = 60
return zone
def get_utm_epsg(longitude, latitude, crs=None):
"""Return epsg code of the utm zone according to X, Y coordinates.
By default, the CRS is EPSG:4326. If the CRS is provided, first X,Y will
be reprojected from the input CRS to WGS84.
The code is based on the code:
http://gis.stackexchange.com/questions/34401
:param longitude: The longitude.
:type longitude: float
:param latitude: The latitude.
:type latitude: float
:param crs: The coordinate reference system of the latitude, longitude.
:type crs: QgsCoordinateReferenceSystem
"""
if crs is None or crs.authid() == 'EPSG:4326':
epsg = 32600
if latitude < 0.0:
epsg += 100
epsg += get_utm_zone(longitude)
return epsg
else:
epsg_4326 = QgsCoordinateReferenceSystem('EPSG:4326')
transform = QgsCoordinateTransform(crs, epsg_4326)
geom = QgsGeometry.fromPoint(QgsPoint(longitude, latitude))
geom.transform(transform)
point = geom.asPoint()
# The point is now in 4326, we can call the function again.
return get_utm_epsg(point.x(), point.y())
def which(name, flags=os.X_OK):
"""Search PATH for executable files with the given name.
..note:: This function was taken verbatim from the twisted framework,
licence available here:
http://twistedmatrix.com/trac/browser/tags/releases/twisted-8.2.0/LICENSE
On newer versions of MS-Windows, the PATHEXT environment variable will be
set to the list of file extensions for files considered executable. This
will normally include things like ".EXE". This function will also find
files
with the given name ending with any of these extensions.
On MS-Windows the only flag that has any meaning is os.F_OK. Any other
flags will be ignored.
:param name: The name for which to search.
:type name: C{str}
:param flags: Arguments to L{os.access}.
:type flags: C{int}
:returns: A list of the full paths to files found, in the order in which
they were found.
:rtype: C{list}
"""
result = []
# pylint: disable=W0141
extensions = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
# pylint: enable=W0141
path = os.environ.get('PATH', None)
# In c6c9b26 we removed this hard coding for issue #529 but I am
# adding it back here in case the user's path does not include the
# gdal binary dir on OSX but it is actually there. (TS)
if sys.platform == 'darwin': # Mac OS X
gdal_prefix = (
'/Library/Frameworks/GDAL.framework/'
'Versions/Current/Programs/')
path = '%s:%s' % (path, gdal_prefix)
LOGGER.debug('Search path: %s' % path)
if path is None:
return []
for p in path.split(os.pathsep):
p = os.path.join(p, name)
if os.access(p, flags):
result.append(p)
for e in extensions:
path_extensions = p + e
if os.access(path_extensions, flags):
result.append(path_extensions)
return result
def color_ramp(number_of_colour):
"""Generate list of color in hexadecimal.
This will generate colors using hsl model by playing around with the hue
see: https://coderwall.com/p/dvsxwg/smoothly-transition-from-green-to-red
:param number_of_colour: The number of intervals between R and G spectrum.
:type number_of_colour: int
:returns: List of color.
:rtype: list
"""
if number_of_colour < 1:
raise Exception('The number of colours should be > 0')
colors = []
if number_of_colour == 1:
hue_interval = 1
else:
hue_interval = 1.0 / (number_of_colour - 1)
for i in range(number_of_colour):
hue = (i * hue_interval) / 3
light = 127.5
saturation = -1.007905138339921
rgb = colorsys.hls_to_rgb(hue, light, saturation)
hex_color = '#%02x%02x%02x' % (rgb[0], rgb[1], rgb[2])
colors.append(hex_color)
return colors
def log_file_path():
"""Get InaSAFE log file path.
:return: InaSAFE log file path.
:rtype: str
"""
log_temp_dir = temp_dir('logs')
path = os.path.join(log_temp_dir, 'inasafe.log')
return path
def romanise(number):
"""Return the roman numeral for a number.
Note that this only works for number in interval range [0, 12] since at
the moment we only use it on realtime earthquake to conver MMI value.
:param number: The number that will be romanised
:type number: float
:return Roman numeral equivalent of the value
:rtype: str
"""
if number is None:
return ''
roman_list = ['0', 'I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII',
'IX', 'X', 'XI', 'XII']
try:
roman = roman_list[int(number)]
except ValueError:
return None
return roman
def humanize_file_size(size):
"""Return humanize size from bytes.
:param size: The size to humanize in bytes.
:type size: float
:return: Human readable size.
:rtype: unicode
"""
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if size < 1024.0:
return u'%3.1f %s' % (size, x)
size /= 1024.0
def add_to_list(my_list, my_element):
"""Helper function to add new my_element to my_list based on its type.
Add as new element if it's not a list, otherwise extend to the list
if it's a list.
It's also guarantee that all elements are unique
:param my_list: A list
:type my_list: list
:param my_element: A new element
:type my_element: str, list
:returns: A list with unique element
:rtype: list
"""
if isinstance(my_element, list):
for element in my_element:
my_list = add_to_list(my_list, element)
else:
if my_element not in my_list:
my_list.append(my_element)
return my_list
| gpl-3.0 |
emailweixu/Paddle | python/paddle/trainer_config_helpers/tests/configs/simple_rnn_layers.py | 15 | 1164 | from paddle.trainer_config_helpers import *
settings(batch_size=1000, learning_rate=1e-4)
din = data_layer(name='data', size=200)
hidden = fc_layer(input=din, size=200, act=SigmoidActivation())
rnn = recurrent_layer(input=hidden, act=SigmoidActivation())
rnn2 = recurrent_layer(input=hidden, act=SigmoidActivation(), reverse=True)
lstm1_param = fc_layer(
input=hidden, size=200 * 4, act=LinearActivation(), bias_attr=False)
lstm1 = lstmemory(input=lstm1_param, act=SigmoidActivation())
lstm2_param = fc_layer(
input=hidden, size=200 * 4, act=LinearActivation(), bias_attr=False)
lstm2 = lstmemory(input=lstm2_param, act=SigmoidActivation(), reverse=True)
gru1_param = fc_layer(
input=hidden, size=200 * 3, act=LinearActivation(), bias_attr=False)
gru1 = grumemory(input=gru1_param, act=SigmoidActivation())
gru2_param = fc_layer(
input=hidden, size=200 * 3, act=LinearActivation(), bias_attr=False)
gru2 = grumemory(input=gru2_param, act=SigmoidActivation(), reverse=True)
outputs(
last_seq(input=rnn),
first_seq(input=rnn2),
last_seq(input=lstm1),
first_seq(input=lstm2),
last_seq(input=gru1),
first_seq(gru2))
| apache-2.0 |
ojengwa/odoo | addons/lunch/tests/__init__.py | 260 | 1077 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_lunch
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zakuro9715/lettuce | lettuce/django/management/commands/harvest.py | 7 | 10191 | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import django
from distutils.version import StrictVersion
from optparse import make_option
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.test.utils import setup_test_environment
from django.test.utils import teardown_test_environment
from lettuce import Runner
from lettuce import registry
from lettuce.core import SummaryTotalResults
from lettuce.django import harvest_lettuces, get_server
from lettuce.django.server import LettuceServerException
class Command(BaseCommand):
help = u'Run lettuce tests all along installed apps'
args = '[PATH to feature file or folder]'
requires_model_validation = False
option_list = BaseCommand.option_list + (
make_option('-a', '--apps', action='store', dest='apps', default='',
help='Run ONLY the django apps that are listed here. Comma separated'),
make_option('-A', '--avoid-apps', action='store', dest='avoid_apps', default='',
help='AVOID running the django apps that are listed here. Comma separated'),
make_option('-S', '--no-server', action='store_true', dest='no_server', default=False,
help="will not run django's builtin HTTP server"),
make_option('--nothreading', action='store_false', dest='use_threading', default=True,
help='Tells Django to NOT use threading.'),
make_option('-T', '--test-server', action='store_true', dest='test_database',
default=getattr(settings, "LETTUCE_USE_TEST_DATABASE", False),
help="will run django's builtin HTTP server using the test databases"),
make_option('-P', '--port', type='int', dest='port',
help="the port in which the HTTP server will run at"),
make_option('-d', '--debug-mode', action='store_true', dest='debug', default=False,
help="when put together with builtin HTTP server, forces django to run with settings.DEBUG=True"),
make_option('-s', '--scenarios', action='store', dest='scenarios', default=None,
help='Comma separated list of scenarios to run'),
make_option("-t", "--tag",
dest="tags",
type="str",
action='append',
default=None,
help='Tells lettuce to run the specified tags only; '
'can be used multiple times to define more tags'
'(prefixing tags with "-" will exclude them and '
'prefixing with "~" will match approximate words)'),
make_option('--with-xunit', action='store_true', dest='enable_xunit', default=False,
help='Output JUnit XML test results to a file'),
make_option('--smtp-queue', action='store_true', dest='smtp_queue', default=False,
help='Use smtp for mail queue (usefull with --no-server option'),
make_option('--xunit-file', action='store', dest='xunit_file', default=None,
help='Write JUnit XML to this file. Defaults to lettucetests.xml'),
make_option('--with-subunit',
action='store_true',
dest='enable_subunit',
default=False,
help='Output Subunit test results to a file'),
make_option('--subunit-file',
action='store',
dest='subunit_file',
default=None,
help='Write Subunit to this file. Defaults to subunit.bin'),
make_option("--failfast", dest="failfast", default=False,
action="store_true", help='Stop running in the first failure'),
make_option("--pdb", dest="auto_pdb", default=False,
action="store_true", help='Launches an interactive debugger upon error'),
)
def create_parser(self, prog_name, subcommand):
parser = super(Command, self).create_parser(prog_name, subcommand)
parser.remove_option('-v')
help_text = ('Verbosity level; 0=no output, 1=only dots, 2=only '
'scenario names, 3=normal output, 4=normal output '
'(colorful, deprecated)')
parser.add_option('-v', '--verbosity',
action='store',
dest='verbosity',
default='3',
type='choice',
choices=map(str, range(5)),
help=help_text)
if StrictVersion(django.get_version()) < StrictVersion('1.7'):
# Django 1.7 introduces the --no-color flag. We must add the flag
# to be compatible with older django versions
parser.add_option('--no-color',
action='store_true',
dest='no_color',
default=False,
help="Don't colorize the command output.")
return parser
def stopserver(self, failed=False):
raise SystemExit(int(failed))
def get_paths(self, args, apps_to_run, apps_to_avoid):
if args:
for path, exists in zip(args, map(os.path.exists, args)):
if not exists:
sys.stderr.write("You passed the path '%s', but it does not exist.\n" % path)
sys.exit(1)
else:
paths = args
else:
paths = harvest_lettuces(apps_to_run, apps_to_avoid) # list of tuples with (path, app_module)
return paths
def handle(self, *args, **options):
setup_test_environment()
verbosity = int(options.get('verbosity', 3))
no_color = int(options.get('no_color', False))
apps_to_run = tuple(options.get('apps', '').split(","))
apps_to_avoid = tuple(options.get('avoid_apps', '').split(","))
run_server = not options.get('no_server', False)
test_database = options.get('test_database', False)
smtp_queue = options.get('smtp_queue', False)
tags = options.get('tags', None)
failfast = options.get('failfast', False)
auto_pdb = options.get('auto_pdb', False)
threading = options.get('use_threading', True)
with_summary = options.get('summary_display', False)
if test_database:
migrate_south = getattr(settings, "SOUTH_TESTS_MIGRATE", True)
try:
from south.management.commands import patch_for_test_db_setup
patch_for_test_db_setup()
except:
migrate_south = False
pass
from django.test.utils import get_runner
self._testrunner = get_runner(settings)(interactive=False)
self._testrunner.setup_test_environment()
self._old_db_config = self._testrunner.setup_databases()
call_command('syncdb', verbosity=0, interactive=False,)
if migrate_south:
call_command('migrate', verbosity=0, interactive=False,)
settings.DEBUG = options.get('debug', False)
paths = self.get_paths(args, apps_to_run, apps_to_avoid)
server = get_server(port=options['port'], threading=threading)
if run_server:
try:
server.start()
except LettuceServerException as e:
raise SystemExit(e)
os.environ['SERVER_NAME'] = str(server.address)
os.environ['SERVER_PORT'] = str(server.port)
failed = False
registry.call_hook('before', 'harvest', locals())
results = []
try:
for path in paths:
app_module = None
if isinstance(path, tuple) and len(path) is 2:
path, app_module = path
if app_module is not None:
registry.call_hook('before_each', 'app', app_module)
runner = Runner(path, options.get('scenarios'),
verbosity, no_color,
enable_xunit=options.get('enable_xunit'),
enable_subunit=options.get('enable_subunit'),
xunit_filename=options.get('xunit_file'),
subunit_filename=options.get('subunit_file'),
tags=tags, failfast=failfast, auto_pdb=auto_pdb,
smtp_queue=smtp_queue)
result = runner.run()
if app_module is not None:
registry.call_hook('after_each', 'app', app_module, result)
results.append(result)
if not result or result.steps != result.steps_passed:
failed = True
except SystemExit as e:
failed = e.code
except Exception as e:
failed = True
import traceback
traceback.print_exc(e)
finally:
summary = SummaryTotalResults(results)
summary.summarize_all()
registry.call_hook('after', 'harvest', summary)
if test_database:
self._testrunner.teardown_databases(self._old_db_config)
teardown_test_environment()
server.stop(failed)
raise SystemExit(int(failed))
| gpl-3.0 |
ArnossArnossi/django | tests/template_tests/test_loaders.py | 263 | 14253 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os.path
import sys
import tempfile
import types
import unittest
from contextlib import contextmanager
from django.template import Context, TemplateDoesNotExist
from django.template.engine import Engine
from django.test import SimpleTestCase, ignore_warnings, override_settings
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from .utils import TEMPLATE_DIR
try:
import pkg_resources
except ImportError:
pkg_resources = None
class CachedLoaderTests(SimpleTestCase):
def setUp(self):
self.engine = Engine(
dirs=[TEMPLATE_DIR],
loaders=[
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
]),
],
)
def test_get_template(self):
template = self.engine.get_template('index.html')
self.assertEqual(template.origin.name, os.path.join(TEMPLATE_DIR, 'index.html'))
self.assertEqual(template.origin.template_name, 'index.html')
self.assertEqual(template.origin.loader, self.engine.template_loaders[0].loaders[0])
cache = self.engine.template_loaders[0].get_template_cache
self.assertEqual(cache['index.html'], template)
# Run a second time from cache
template = self.engine.get_template('index.html')
self.assertEqual(template.origin.name, os.path.join(TEMPLATE_DIR, 'index.html'))
self.assertEqual(template.origin.template_name, 'index.html')
self.assertEqual(template.origin.loader, self.engine.template_loaders[0].loaders[0])
def test_get_template_missing(self):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('doesnotexist.html')
e = self.engine.template_loaders[0].get_template_cache['doesnotexist.html']
self.assertEqual(e.args[0], 'doesnotexist.html')
@ignore_warnings(category=RemovedInDjango20Warning)
def test_load_template(self):
loader = self.engine.template_loaders[0]
template, origin = loader.load_template('index.html')
self.assertEqual(template.origin.template_name, 'index.html')
cache = self.engine.template_loaders[0].template_cache
self.assertEqual(cache['index.html'][0], template)
# Run a second time from cache
loader = self.engine.template_loaders[0]
source, name = loader.load_template('index.html')
self.assertEqual(template.origin.template_name, 'index.html')
@ignore_warnings(category=RemovedInDjango20Warning)
def test_load_template_missing(self):
"""
#19949 -- TemplateDoesNotExist exceptions should be cached.
"""
loader = self.engine.template_loaders[0]
self.assertFalse('missing.html' in loader.template_cache)
with self.assertRaises(TemplateDoesNotExist):
loader.load_template("missing.html")
self.assertEqual(
loader.template_cache["missing.html"],
TemplateDoesNotExist,
"Cached loader failed to cache the TemplateDoesNotExist exception",
)
def test_templatedir_caching(self):
"""
#13573 -- Template directories should be part of the cache key.
"""
# Retrieve a template specifying a template directory to check
t1, name = self.engine.find_template('test.html', (os.path.join(TEMPLATE_DIR, 'first'),))
# Now retrieve the same template name, but from a different directory
t2, name = self.engine.find_template('test.html', (os.path.join(TEMPLATE_DIR, 'second'),))
# The two templates should not have the same content
self.assertNotEqual(t1.render(Context({})), t2.render(Context({})))
@unittest.skipUnless(pkg_resources, 'setuptools is not installed')
class EggLoaderTests(SimpleTestCase):
@contextmanager
def create_egg(self, name, resources):
"""
Creates a mock egg with a list of resources.
name: The name of the module.
resources: A dictionary of template names mapped to file-like objects.
"""
if six.PY2:
name = name.encode('utf-8')
class MockLoader(object):
pass
class MockProvider(pkg_resources.NullProvider):
def __init__(self, module):
pkg_resources.NullProvider.__init__(self, module)
self.module = module
def _has(self, path):
return path in self.module._resources
def _isdir(self, path):
return False
def get_resource_stream(self, manager, resource_name):
return self.module._resources[resource_name]
def _get(self, path):
return self.module._resources[path].read()
def _fn(self, base, resource_name):
return os.path.normcase(resource_name)
egg = types.ModuleType(name)
egg.__loader__ = MockLoader()
egg.__path__ = ['/some/bogus/path/']
egg.__file__ = '/some/bogus/path/__init__.pyc'
egg._resources = resources
sys.modules[name] = egg
pkg_resources._provider_factories[MockLoader] = MockProvider
try:
yield
finally:
del sys.modules[name]
del pkg_resources._provider_factories[MockLoader]
@classmethod
@ignore_warnings(category=RemovedInDjango20Warning)
def setUpClass(cls):
cls.engine = Engine(loaders=[
'django.template.loaders.eggs.Loader',
])
cls.loader = cls.engine.template_loaders[0]
super(EggLoaderTests, cls).setUpClass()
def test_get_template(self):
templates = {
os.path.normcase('templates/y.html'): six.StringIO("y"),
}
with self.create_egg('egg', templates):
with override_settings(INSTALLED_APPS=['egg']):
template = self.engine.get_template("y.html")
self.assertEqual(template.origin.name, 'egg:egg:templates/y.html')
self.assertEqual(template.origin.template_name, 'y.html')
self.assertEqual(template.origin.loader, self.engine.template_loaders[0])
output = template.render(Context({}))
self.assertEqual(output, "y")
@ignore_warnings(category=RemovedInDjango20Warning)
def test_load_template_source(self):
loader = self.engine.template_loaders[0]
templates = {
os.path.normcase('templates/y.html'): six.StringIO("y"),
}
with self.create_egg('egg', templates):
with override_settings(INSTALLED_APPS=['egg']):
source, name = loader.load_template_source('y.html')
self.assertEqual(source.strip(), 'y')
self.assertEqual(name, 'egg:egg:templates/y.html')
def test_non_existing(self):
"""
Template loading fails if the template is not in the egg.
"""
with self.create_egg('egg', {}):
with override_settings(INSTALLED_APPS=['egg']):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('not-existing.html')
def test_not_installed(self):
"""
Template loading fails if the egg is not in INSTALLED_APPS.
"""
templates = {
os.path.normcase('templates/y.html'): six.StringIO("y"),
}
with self.create_egg('egg', templates):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('y.html')
class FileSystemLoaderTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine(dirs=[TEMPLATE_DIR])
super(FileSystemLoaderTests, cls).setUpClass()
@contextmanager
def set_dirs(self, dirs):
original_dirs = self.engine.dirs
self.engine.dirs = dirs
try:
yield
finally:
self.engine.dirs = original_dirs
@contextmanager
def source_checker(self, dirs):
loader = self.engine.template_loaders[0]
def check_sources(path, expected_sources):
expected_sources = [os.path.abspath(s) for s in expected_sources]
self.assertEqual(
[origin.name for origin in loader.get_template_sources(path)],
expected_sources,
)
with self.set_dirs(dirs):
yield check_sources
def test_get_template(self):
template = self.engine.get_template('index.html')
self.assertEqual(template.origin.name, os.path.join(TEMPLATE_DIR, 'index.html'))
self.assertEqual(template.origin.template_name, 'index.html')
self.assertEqual(template.origin.loader, self.engine.template_loaders[0])
self.assertEqual(template.origin.loader_name, 'django.template.loaders.filesystem.Loader')
@ignore_warnings(category=RemovedInDjango20Warning)
def test_load_template_source(self):
loader = self.engine.template_loaders[0]
source, name = loader.load_template_source('index.html')
self.assertEqual(source.strip(), 'index')
self.assertEqual(name, os.path.join(TEMPLATE_DIR, 'index.html'))
def test_directory_security(self):
with self.source_checker(['/dir1', '/dir2']) as check_sources:
check_sources('index.html', ['/dir1/index.html', '/dir2/index.html'])
check_sources('/etc/passwd', [])
check_sources('etc/passwd', ['/dir1/etc/passwd', '/dir2/etc/passwd'])
check_sources('../etc/passwd', [])
check_sources('../../../etc/passwd', [])
check_sources('/dir1/index.html', ['/dir1/index.html'])
check_sources('../dir2/index.html', ['/dir2/index.html'])
check_sources('/dir1blah', [])
check_sources('../dir1blah', [])
def test_unicode_template_name(self):
with self.source_checker(['/dir1', '/dir2']) as check_sources:
# UTF-8 bytestrings are permitted.
check_sources(b'\xc3\x85ngstr\xc3\xb6m', ['/dir1/Ångström', '/dir2/Ångström'])
# Unicode strings are permitted.
check_sources('Ångström', ['/dir1/Ångström', '/dir2/Ångström'])
def test_utf8_bytestring(self):
"""
Invalid UTF-8 encoding in bytestrings should raise a useful error
"""
engine = Engine()
loader = engine.template_loaders[0]
with self.assertRaises(UnicodeDecodeError):
list(loader.get_template_sources(b'\xc3\xc3', ['/dir1']))
def test_unicode_dir_name(self):
with self.source_checker([b'/Stra\xc3\x9fe']) as check_sources:
check_sources('Ångström', ['/Straße/Ångström'])
check_sources(b'\xc3\x85ngstr\xc3\xb6m', ['/Straße/Ångström'])
@unittest.skipUnless(
os.path.normcase('/TEST') == os.path.normpath('/test'),
"This test only runs on case-sensitive file systems.",
)
def test_case_sensitivity(self):
with self.source_checker(['/dir1', '/DIR2']) as check_sources:
check_sources('index.html', ['/dir1/index.html', '/DIR2/index.html'])
check_sources('/DIR1/index.HTML', ['/DIR1/index.HTML'])
def test_file_does_not_exist(self):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('doesnotexist.html')
@unittest.skipIf(
sys.platform == 'win32',
"Python on Windows doesn't have working os.chmod().",
)
def test_permissions_error(self):
with tempfile.NamedTemporaryFile() as tmpfile:
tmpdir = os.path.dirname(tmpfile.name)
tmppath = os.path.join(tmpdir, tmpfile.name)
os.chmod(tmppath, 0o0222)
with self.set_dirs([tmpdir]):
with self.assertRaisesMessage(IOError, 'Permission denied'):
self.engine.get_template(tmpfile.name)
def test_notafile_error(self):
with self.assertRaises(IOError):
self.engine.get_template('first')
class AppDirectoriesLoaderTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine(
loaders=['django.template.loaders.app_directories.Loader'],
)
super(AppDirectoriesLoaderTests, cls).setUpClass()
@override_settings(INSTALLED_APPS=['template_tests'])
def test_get_template(self):
template = self.engine.get_template('index.html')
self.assertEqual(template.origin.name, os.path.join(TEMPLATE_DIR, 'index.html'))
self.assertEqual(template.origin.template_name, 'index.html')
self.assertEqual(template.origin.loader, self.engine.template_loaders[0])
@ignore_warnings(category=RemovedInDjango20Warning)
@override_settings(INSTALLED_APPS=['template_tests'])
def test_load_template_source(self):
loader = self.engine.template_loaders[0]
source, name = loader.load_template_source('index.html')
self.assertEqual(source.strip(), 'index')
self.assertEqual(name, os.path.join(TEMPLATE_DIR, 'index.html'))
@override_settings(INSTALLED_APPS=[])
def test_not_installed(self):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('index.html')
class LocmemLoaderTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine(
loaders=[('django.template.loaders.locmem.Loader', {
'index.html': 'index',
})],
)
super(LocmemLoaderTests, cls).setUpClass()
def test_get_template(self):
template = self.engine.get_template('index.html')
self.assertEqual(template.origin.name, 'index.html')
self.assertEqual(template.origin.template_name, 'index.html')
self.assertEqual(template.origin.loader, self.engine.template_loaders[0])
@ignore_warnings(category=RemovedInDjango20Warning)
def test_load_template_source(self):
loader = self.engine.template_loaders[0]
source, name = loader.load_template_source('index.html')
self.assertEqual(source.strip(), 'index')
self.assertEqual(name, 'index.html')
| bsd-3-clause |
Manojkumar91/odoo_inresto | addons/website_customer/__init__.py | 316 | 1024 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP S.A. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import controllers
import models
| agpl-3.0 |
cxysteven/Paddle | demo/sentiment/dataprovider.py | 5 | 1398 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer.PyDataProvider2 import *
def hook(settings, dictionary, **kwargs):
settings.word_dict = dictionary
settings.input_types = [
integer_value_sequence(len(settings.word_dict)), integer_value(2)
]
settings.logger.info('dict len : %d' % (len(settings.word_dict)))
@provider(init_hook=hook)
def process(settings, file_name):
with open(file_name, 'r') as fdata:
for line_count, line in enumerate(fdata):
label, comment = line.strip().split('\t\t')
label = int(label)
words = comment.split()
word_slot = [
settings.word_dict[w] for w in words if w in settings.word_dict
]
if not word_slot:
continue
yield word_slot, label
| apache-2.0 |
tbabej/freeipa | ipatests/test_webui/test_dns.py | 5 | 4696 | # Authors:
# Petr Vobornik <pvoborni@redhat.com>
#
# Copyright (C) 2013 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
DNS tests
"""
from ipatests.test_webui.ui_driver import UI_driver
from ipatests.test_webui.ui_driver import screenshot
import pytest
ZONE_ENTITY = 'dnszone'
FORWARD_ZONE_ENTITY = 'dnsforwardzone'
RECORD_ENTITY = 'dnsrecord'
CONFIG_ENTITY = 'dnsconfig'
ZONE_DEFAULT_FACET = 'records'
ZONE_PKEY = 'foo.itest.'
ZONE_DATA = {
'pkey': ZONE_PKEY,
'add': [
('textbox', 'idnsname', ZONE_PKEY),
],
'mod': [
('checkbox', 'idnsallowsyncptr', 'checked'),
],
}
FORWARD_ZONE_PKEY = 'forward.itest.'
FORWARD_ZONE_DATA = {
'pkey': FORWARD_ZONE_PKEY,
'add': [
('textbox', 'idnsname', FORWARD_ZONE_PKEY),
('multivalued', 'idnsforwarders', [
('add', '192.168.2.1'),
]),
('radio', 'idnsforwardpolicy', 'only'),
],
'mod': [
('multivalued', 'idnsforwarders', [
('add', '192.168.3.1'),
]),
('checkbox', 'idnsforwardpolicy', 'first'),
],
}
RECORD_PKEY = 'itest'
A_IP = '192.168.1.10'
RECORD_ADD_DATA = {
'pkey': RECORD_PKEY,
'add': [
('textbox', 'idnsname', RECORD_PKEY),
('textbox', 'a_part_ip_address', A_IP),
]
}
RECORD_MOD_DATA = {
'fields': [
('textbox', 'a_part_ip_address', '192.168.1.11'),
]
}
CONFIG_MOD_DATA = {
'mod': [
('checkbox', 'idnsallowsyncptr', 'checked'),
],
}
@pytest.mark.tier1
class test_dns(UI_driver):
def setup(self, *args, **kwargs):
super(test_dns, self).setup(*args, **kwargs)
if not self.has_dns():
self.skip('DNS not configured')
@screenshot
def test_zone_record_crud(self):
"""
Basic CRUD: dns
"""
self.init_app()
# add and mod zone
self.basic_crud(ZONE_ENTITY, ZONE_DATA,
default_facet=ZONE_DEFAULT_FACET, delete=False)
# add and mod record
self.navigate_to_record(ZONE_PKEY)
self.add_record(ZONE_ENTITY, RECORD_ADD_DATA,
facet=ZONE_DEFAULT_FACET, navigate=False)
self.navigate_to_record(RECORD_PKEY)
self.add_table_record('arecord', RECORD_MOD_DATA)
# del record, del zone
self.navigate_by_breadcrumb(ZONE_PKEY)
self.delete_record(RECORD_PKEY)
self.navigate_by_breadcrumb("DNS Zones")
self.delete_record(ZONE_PKEY)
@screenshot
def test_forward_zone(self):
"""
Forward DNS zones
"""
self.init_app()
# add and mod zone
self.basic_crud(FORWARD_ZONE_ENTITY, FORWARD_ZONE_DATA, delete=False)
# enable/disable
self.navigate_to_record(FORWARD_ZONE_PKEY)
self.disable_action()
self.enable_action()
self.action_list_action('add_permission')
self.action_list_action('remove_permission')
# del zone
self.navigate_by_breadcrumb("DNS Forward Zones")
self.delete_record(FORWARD_ZONE_PKEY)
@screenshot
def test_last_entry_deletion(self):
"""
Test last entry deletion
"""
self.init_app()
self.add_record(ZONE_ENTITY, ZONE_DATA)
self.navigate_to_record(ZONE_PKEY)
self.add_record(ZONE_ENTITY, RECORD_ADD_DATA,
facet=ZONE_DEFAULT_FACET)
self.navigate_to_record(RECORD_PKEY)
self.delete_record(A_IP, parent=self.get_facet(), table_name='arecord')
self.assert_dialog('message_dialog')
self.dialog_button_click('ok')
self.wait_for_request(n=2)
self.assert_facet(ZONE_ENTITY, ZONE_DEFAULT_FACET)
self.navigate_by_breadcrumb("DNS Zones")
self.delete_record(ZONE_PKEY)
@screenshot
def test_config_crud(self):
"""
Basic CRUD: dnsconfig
"""
self.init_app()
self.navigate_by_menu('network_services/dns/dnsconfig')
self.mod_record(CONFIG_ENTITY, CONFIG_MOD_DATA)
| gpl-3.0 |
pbrady/sympy | sympy/core/compatibility.py | 42 | 30993 | """
Reimplementations of constructs introduced in later versions of Python than
we support. Also some functions that are needed SymPy-wide and are located
here for easy import.
"""
from __future__ import print_function, division
import operator
from collections import defaultdict
from sympy.external import import_module
"""
Python 2 and Python 3 compatible imports
String and Unicode compatible changes:
* `unicode()` removed in Python 3, import `unicode` for Python 2/3
compatible function
* `unichr()` removed in Python 3, import `unichr` for Python 2/3 compatible
function
* Use `u()` for escaped unicode sequences (e.g. u'\u2020' -> u('\u2020'))
* Use `u_decode()` to decode utf-8 formatted unicode strings
* `string_types` gives str in Python 3, unicode and str in Python 2,
equivalent to basestring
Integer related changes:
* `long()` removed in Python 3, import `long` for Python 2/3 compatible
function
* `integer_types` gives int in Python 3, int and long in Python 2
Types related changes:
* `class_types` gives type in Python 3, type and ClassType in Python 2
Renamed function attributes:
* Python 2 `.func_code`, Python 3 `.__func__`, access with
`get_function_code()`
* Python 2 `.func_globals`, Python 3 `.__globals__`, access with
`get_function_globals()`
* Python 2 `.func_name`, Python 3 `.__name__`, access with
`get_function_name()`
Moved modules:
* `reduce()`
* `StringIO()`
* `cStringIO()` (same as `StingIO()` in Python 3)
* Python 2 `__builtins__`, access with Python 3 name, `builtins`
Iterator/list changes:
* `xrange` removed in Python 3, import `xrange` for Python 2/3 compatible
iterator version of range
exec:
* Use `exec_()`, with parameters `exec_(code, globs=None, locs=None)`
Metaclasses:
* Use `with_metaclass()`, examples below
* Define class `Foo` with metaclass `Meta`, and no parent:
class Foo(with_metaclass(Meta)):
pass
* Define class `Foo` with metaclass `Meta` and parent class `Bar`:
class Foo(with_metaclass(Meta, Bar)):
pass
"""
import sys
PY3 = sys.version_info[0] > 2
if PY3:
class_types = type,
integer_types = (int,)
string_types = (str,)
long = int
# String / unicode compatibility
unicode = str
unichr = chr
def u(x):
return x
def u_decode(x):
return x
Iterator = object
# Moved definitions
get_function_code = operator.attrgetter("__code__")
get_function_globals = operator.attrgetter("__globals__")
get_function_name = operator.attrgetter("__name__")
import builtins
from functools import reduce
from io import StringIO
cStringIO = StringIO
exec_=getattr(builtins, "exec")
range=range
else:
import codecs
import types
class_types = (type, types.ClassType)
integer_types = (int, long)
string_types = (str, unicode)
long = long
# String / unicode compatibility
unicode = unicode
unichr = unichr
def u(x):
return codecs.unicode_escape_decode(x)[0]
def u_decode(x):
return x.decode('utf-8')
class Iterator(object):
def next(self):
return type(self).__next__(self)
# Moved definitions
get_function_code = operator.attrgetter("func_code")
get_function_globals = operator.attrgetter("func_globals")
get_function_name = operator.attrgetter("func_name")
import __builtin__ as builtins
reduce = reduce
from StringIO import StringIO
from cStringIO import StringIO as cStringIO
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("exec _code_ in _globs_, _locs_")
range=xrange
def with_metaclass(meta, *bases):
"""
Create a base class with a metaclass.
For example, if you have the metaclass
>>> class Meta(type):
... pass
Use this as the metaclass by doing
>>> from sympy.core.compatibility import with_metaclass
>>> class MyClass(with_metaclass(Meta, object)):
... pass
This is equivalent to the Python 2::
class MyClass(object):
__metaclass__ = Meta
or Python 3::
class MyClass(object, metaclass=Meta):
pass
That is, the first argument is the metaclass, and the remaining arguments
are the base classes. Note that if the base class is just ``object``, you
may omit it.
>>> MyClass.__mro__
(<class 'MyClass'>, <... 'object'>)
>>> type(MyClass)
<class 'Meta'>
"""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
# Code copied from the 'six' library.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, "NewBase", (), {})
# These are in here because telling if something is an iterable just by calling
# hasattr(obj, "__iter__") behaves differently in Python 2 and Python 3. In
# particular, hasattr(str, "__iter__") is False in Python 2 and True in Python 3.
# I think putting them here also makes it easier to use them in the core.
class NotIterable:
"""
Use this as mixin when creating a class which is not supposed to return
true when iterable() is called on its instances. I.e. avoid infinite loop
when calling e.g. list() on the instance
"""
pass
def iterable(i, exclude=(string_types, dict, NotIterable)):
"""
Return a boolean indicating whether ``i`` is SymPy iterable.
True also indicates that the iterator is finite, i.e. you e.g.
call list(...) on the instance.
When SymPy is working with iterables, it is almost always assuming
that the iterable is not a string or a mapping, so those are excluded
by default. If you want a pure Python definition, make exclude=None. To
exclude multiple items, pass them as a tuple.
See also: is_sequence
Examples
========
>>> from sympy.utilities.iterables import iterable
>>> from sympy import Tuple
>>> things = [[1], (1,), set([1]), Tuple(1), (j for j in [1, 2]), {1:2}, '1', 1]
>>> for i in things:
... print('%s %s' % (iterable(i), type(i)))
True <... 'list'>
True <... 'tuple'>
True <... 'set'>
True <class 'sympy.core.containers.Tuple'>
True <... 'generator'>
False <... 'dict'>
False <... 'str'>
False <... 'int'>
>>> iterable({}, exclude=None)
True
>>> iterable({}, exclude=str)
True
>>> iterable("no", exclude=str)
False
"""
try:
iter(i)
except TypeError:
return False
if exclude:
return not isinstance(i, exclude)
return True
def is_sequence(i, include=None):
"""
Return a boolean indicating whether ``i`` is a sequence in the SymPy
sense. If anything that fails the test below should be included as
being a sequence for your application, set 'include' to that object's
type; multiple types should be passed as a tuple of types.
Note: although generators can generate a sequence, they often need special
handling to make sure their elements are captured before the generator is
exhausted, so these are not included by default in the definition of a
sequence.
See also: iterable
Examples
========
>>> from sympy.utilities.iterables import is_sequence
>>> from types import GeneratorType
>>> is_sequence([])
True
>>> is_sequence(set())
False
>>> is_sequence('abc')
False
>>> is_sequence('abc', include=str)
True
>>> generator = (c for c in 'abc')
>>> is_sequence(generator)
False
>>> is_sequence(generator, include=(str, GeneratorType))
True
"""
return (hasattr(i, '__getitem__') and
iterable(i) or
bool(include) and
isinstance(i, include))
try:
from functools import cmp_to_key
except ImportError: # <= Python 2.6
def cmp_to_key(mycmp):
"""
Convert a cmp= function into a key= function
"""
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
try:
from itertools import zip_longest
except ImportError: # <= Python 2.7
from itertools import izip_longest as zip_longest
try:
from itertools import combinations_with_replacement
except ImportError: # <= Python 2.6
def combinations_with_replacement(iterable, r):
"""Return r length subsequences of elements from the input iterable
allowing individual elements to be repeated more than once.
Combinations are emitted in lexicographic sort order. So, if the
input iterable is sorted, the combination tuples will be produced
in sorted order.
Elements are treated as unique based on their position, not on their
value. So if the input elements are unique, the generated combinations
will also be unique.
See also: combinations
Examples
========
>>> from sympy.core.compatibility import combinations_with_replacement
>>> list(combinations_with_replacement('AB', 2))
[('A', 'A'), ('A', 'B'), ('B', 'B')]
"""
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
def as_int(n):
"""
Convert the argument to a builtin integer.
The return value is guaranteed to be equal to the input. ValueError is
raised if the input has a non-integral value.
Examples
========
>>> from sympy.core.compatibility import as_int
>>> from sympy import sqrt
>>> 3.0
3.0
>>> as_int(3.0) # convert to int and test for equality
3
>>> int(sqrt(10))
3
>>> as_int(sqrt(10))
Traceback (most recent call last):
...
ValueError: ... is not an integer
"""
try:
result = int(n)
if result != n:
raise TypeError
except TypeError:
raise ValueError('%s is not an integer' % n)
return result
def default_sort_key(item, order=None):
"""Return a key that can be used for sorting.
The key has the structure:
(class_key, (len(args), args), exponent.sort_key(), coefficient)
This key is supplied by the sort_key routine of Basic objects when
``item`` is a Basic object or an object (other than a string) that
sympifies to a Basic object. Otherwise, this function produces the
key.
The ``order`` argument is passed along to the sort_key routine and is
used to determine how the terms *within* an expression are ordered.
(See examples below) ``order`` options are: 'lex', 'grlex', 'grevlex',
and reversed values of the same (e.g. 'rev-lex'). The default order
value is None (which translates to 'lex').
Examples
========
>>> from sympy import S, I, default_sort_key, sin, cos, sqrt
>>> from sympy.core.function import UndefinedFunction
>>> from sympy.abc import x
The following are equivalent ways of getting the key for an object:
>>> x.sort_key() == default_sort_key(x)
True
Here are some examples of the key that is produced:
>>> default_sort_key(UndefinedFunction('f'))
((0, 0, 'UndefinedFunction'), (1, ('f',)), ((1, 0, 'Number'),
(0, ()), (), 1), 1)
>>> default_sort_key('1')
((0, 0, 'str'), (1, ('1',)), ((1, 0, 'Number'), (0, ()), (), 1), 1)
>>> default_sort_key(S.One)
((1, 0, 'Number'), (0, ()), (), 1)
>>> default_sort_key(2)
((1, 0, 'Number'), (0, ()), (), 2)
While sort_key is a method only defined for SymPy objects,
default_sort_key will accept anything as an argument so it is
more robust as a sorting key. For the following, using key=
lambda i: i.sort_key() would fail because 2 doesn't have a sort_key
method; that's why default_sort_key is used. Note, that it also
handles sympification of non-string items likes ints:
>>> a = [2, I, -I]
>>> sorted(a, key=default_sort_key)
[2, -I, I]
The returned key can be used anywhere that a key can be specified for
a function, e.g. sort, min, max, etc...:
>>> a.sort(key=default_sort_key); a[0]
2
>>> min(a, key=default_sort_key)
2
Note
----
The key returned is useful for getting items into a canonical order
that will be the same across platforms. It is not directly useful for
sorting lists of expressions:
>>> a, b = x, 1/x
Since ``a`` has only 1 term, its value of sort_key is unaffected by
``order``:
>>> a.sort_key() == a.sort_key('rev-lex')
True
If ``a`` and ``b`` are combined then the key will differ because there
are terms that can be ordered:
>>> eq = a + b
>>> eq.sort_key() == eq.sort_key('rev-lex')
False
>>> eq.as_ordered_terms()
[x, 1/x]
>>> eq.as_ordered_terms('rev-lex')
[1/x, x]
But since the keys for each of these terms are independent of ``order``'s
value, they don't sort differently when they appear separately in a list:
>>> sorted(eq.args, key=default_sort_key)
[1/x, x]
>>> sorted(eq.args, key=lambda i: default_sort_key(i, order='rev-lex'))
[1/x, x]
The order of terms obtained when using these keys is the order that would
be obtained if those terms were *factors* in a product.
Although it is useful for quickly putting expressions in canonical order,
it does not sort expressions based on their complexity defined by the
number of operations, power of variables and others:
>>> sorted([sin(x)*cos(x), sin(x)], key=default_sort_key)
[sin(x)*cos(x), sin(x)]
>>> sorted([x, x**2, sqrt(x), x**3], key=default_sort_key)
[sqrt(x), x, x**2, x**3]
See Also
========
ordered, sympy.core.expr.as_ordered_factors, sympy.core.expr.as_ordered_terms
"""
from sympy.core import S, Basic
from sympy.core.sympify import sympify, SympifyError
from sympy.core.compatibility import iterable
if isinstance(item, Basic):
return item.sort_key(order=order)
if iterable(item, exclude=string_types):
if isinstance(item, dict):
args = item.items()
unordered = True
elif isinstance(item, set):
args = item
unordered = True
else:
# e.g. tuple, list
args = list(item)
unordered = False
args = [default_sort_key(arg, order=order) for arg in args]
if unordered:
# e.g. dict, set
args = sorted(args)
cls_index, args = 10, (len(args), tuple(args))
else:
if not isinstance(item, string_types):
try:
item = sympify(item)
except SympifyError:
# e.g. lambda x: x
pass
else:
if isinstance(item, Basic):
# e.g int -> Integer
return default_sort_key(item)
# e.g. UndefinedFunction
# e.g. str
cls_index, args = 0, (1, (str(item),))
return (cls_index, 0, item.__class__.__name__
), args, S.One.sort_key(), S.One
def _nodes(e):
"""
A helper for ordered() which returns the node count of ``e`` which
for Basic objects is the number of Basic nodes in the expression tree
but for other objects is 1 (unless the object is an iterable or dict
for which the sum of nodes is returned).
"""
from .basic import Basic
if isinstance(e, Basic):
return e.count(Basic)
elif iterable(e):
return 1 + sum(_nodes(ei) for ei in e)
elif isinstance(e, dict):
return 1 + sum(_nodes(k) + _nodes(v) for k, v in e.items())
else:
return 1
def ordered(seq, keys=None, default=True, warn=False):
"""Return an iterator of the seq where keys are used to break ties in
a conservative fashion: if, after applying a key, there are no ties
then no other keys will be computed.
Two default keys will be applied if 1) keys are not provided or 2) the
given keys don't resolve all ties (but only if `default` is True). The
two keys are `_nodes` (which places smaller expressions before large) and
`default_sort_key` which (if the `sort_key` for an object is defined
properly) should resolve any ties.
If ``warn`` is True then an error will be raised if there were no
keys remaining to break ties. This can be used if it was expected that
there should be no ties between items that are not identical.
Examples
========
>>> from sympy.utilities.iterables import ordered
>>> from sympy import count_ops
>>> from sympy.abc import x, y
The count_ops is not sufficient to break ties in this list and the first
two items appear in their original order (i.e. the sorting is stable):
>>> list(ordered([y + 2, x + 2, x**2 + y + 3],
... count_ops, default=False, warn=False))
...
[y + 2, x + 2, x**2 + y + 3]
The default_sort_key allows the tie to be broken:
>>> list(ordered([y + 2, x + 2, x**2 + y + 3]))
...
[x + 2, y + 2, x**2 + y + 3]
Here, sequences are sorted by length, then sum:
>>> seq, keys = [[[1, 2, 1], [0, 3, 1], [1, 1, 3], [2], [1]], [
... lambda x: len(x),
... lambda x: sum(x)]]
...
>>> list(ordered(seq, keys, default=False, warn=False))
[[1], [2], [1, 2, 1], [0, 3, 1], [1, 1, 3]]
If ``warn`` is True, an error will be raised if there were not
enough keys to break ties:
>>> list(ordered(seq, keys, default=False, warn=True))
Traceback (most recent call last):
...
ValueError: not enough keys to break ties
Notes
=====
The decorated sort is one of the fastest ways to sort a sequence for
which special item comparison is desired: the sequence is decorated,
sorted on the basis of the decoration (e.g. making all letters lower
case) and then undecorated. If one wants to break ties for items that
have the same decorated value, a second key can be used. But if the
second key is expensive to compute then it is inefficient to decorate
all items with both keys: only those items having identical first key
values need to be decorated. This function applies keys successively
only when needed to break ties. By yielding an iterator, use of the
tie-breaker is delayed as long as possible.
This function is best used in cases when use of the first key is
expected to be a good hashing function; if there are no unique hashes
from application of a key then that key should not have been used. The
exception, however, is that even if there are many collisions, if the
first group is small and one does not need to process all items in the
list then time will not be wasted sorting what one was not interested
in. For example, if one were looking for the minimum in a list and
there were several criteria used to define the sort order, then this
function would be good at returning that quickly if the first group
of candidates is small relative to the number of items being processed.
"""
d = defaultdict(list)
if keys:
if not isinstance(keys, (list, tuple)):
keys = [keys]
keys = list(keys)
f = keys.pop(0)
for a in seq:
d[f(a)].append(a)
else:
if not default:
raise ValueError('if default=False then keys must be provided')
d[None].extend(seq)
for k in sorted(d.keys()):
if len(d[k]) > 1:
if keys:
d[k] = ordered(d[k], keys, default, warn)
elif default:
d[k] = ordered(d[k], (_nodes, default_sort_key,),
default=False, warn=warn)
elif warn:
from sympy.utilities.iterables import uniq
u = list(uniq(d[k]))
if len(u) > 1:
raise ValueError(
'not enough keys to break ties: %s' % u)
for v in d[k]:
yield v
d.pop(k)
# If HAS_GMPY is 0, no supported version of gmpy is available. Otherwise,
# HAS_GMPY contains the major version number of gmpy; i.e. 1 for gmpy, and
# 2 for gmpy2.
# Versions of gmpy prior to 1.03 do not work correctly with int(largempz)
# For example, int(gmpy.mpz(2**256)) would raise OverflowError.
# See issue 4980.
# Minimum version of gmpy changed to 1.13 to allow a single code base to also
# work with gmpy2.
def _getenv(key, default=None):
from os import getenv
return getenv(key, default)
GROUND_TYPES = _getenv('SYMPY_GROUND_TYPES', 'auto').lower()
HAS_GMPY = 0
if GROUND_TYPES != 'python':
# Don't try to import gmpy2 if ground types is set to gmpy1. This is
# primarily intended for testing.
if GROUND_TYPES != 'gmpy1':
gmpy = import_module('gmpy2', min_module_version='2.0.0',
module_version_attr='version', module_version_attr_call_args=())
if gmpy:
HAS_GMPY = 2
else:
GROUND_TYPES = 'gmpy'
if not HAS_GMPY:
gmpy = import_module('gmpy', min_module_version='1.13',
module_version_attr='version', module_version_attr_call_args=())
if gmpy:
HAS_GMPY = 1
if GROUND_TYPES == 'auto':
if HAS_GMPY:
GROUND_TYPES = 'gmpy'
else:
GROUND_TYPES = 'python'
if GROUND_TYPES == 'gmpy' and not HAS_GMPY:
from warnings import warn
warn("gmpy library is not installed, switching to 'python' ground types")
GROUND_TYPES = 'python'
# SYMPY_INTS is a tuple containing the base types for valid integer types.
SYMPY_INTS = integer_types
if GROUND_TYPES == 'gmpy':
SYMPY_INTS += (type(gmpy.mpz(0)),)
# check_output() is new in Python 2.7
import os
try:
try:
from subprocess import check_output
except ImportError: # <= Python 2.6
from subprocess import CalledProcessError, check_call
def check_output(*args, **kwargs):
with open(os.devnull, 'w') as fh:
kwargs['stdout'] = fh
try:
return check_call(*args, **kwargs)
except CalledProcessError as e:
e.output = ("program output is not available for Python 2.6.x")
raise e
except ImportError:
# running on platform like App Engine, no subprocess at all
pass
# lru_cache compatible with py2.6->py3.2 copied directly from
# http://code.activestate.com/
# recipes/578078-py26-and-py30-backport-of-python-33s-lru-cache/
from collections import namedtuple
from functools import update_wrapper
from threading import RLock
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
class _HashedSeq(list):
__slots__ = 'hashvalue'
def __init__(self, tup, hash=hash):
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue
def _make_key(args, kwds, typed,
kwd_mark = (object(),),
fasttypes = set((int, str, frozenset, type(None))),
sorted=sorted, tuple=tuple, type=type, len=len):
'Make a cache key from optionally typed positional and keyword arguments'
key = args
if kwds:
sorted_items = sorted(kwds.items())
key += kwd_mark
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
def lru_cache(maxsize=100, typed=False):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
def decorating_function(user_function):
cache = dict()
stats = [0, 0] # make statistics updateable non-locally
HITS, MISSES = 0, 1 # names for the stats fields
make_key = _make_key
cache_get = cache.get # bound method to lookup key or return None
_len = len # localize the global len() function
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
nonlocal_root = [root] # make updateable non-locally
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
if maxsize == 0:
def wrapper(*args, **kwds):
# no caching, just do a statistics update after a successful call
result = user_function(*args, **kwds)
stats[MISSES] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# simple caching without ordering or size limit
key = make_key(args, kwds, typed)
result = cache_get(key, root) # root used here as a unique not-found sentinel
if result is not root:
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
stats[MISSES] += 1
return result
else:
def wrapper(*args, **kwds):
# size limited caching that tracks accesses by recency
try:
key = make_key(args, kwds, typed) if kwds or typed else args
except TypeError:
stats[MISSES] += 1
return user_function(*args, **kwds)
with lock:
link = cache_get(key)
if link is not None:
# record recent use of the key by moving it to the front of the list
root, = nonlocal_root
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
with lock:
root, = nonlocal_root
if key in cache:
# getting here means that this same key was added to the
# cache while the lock was released. since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif _len(cache) >= maxsize:
# use the old root to store the new key and result
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# empty the oldest link and make it the new root
root = nonlocal_root[0] = oldroot[NEXT]
oldkey = root[KEY]
oldvalue = root[RESULT]
root[KEY] = root[RESULT] = None
# now update the cache dictionary for the new links
del cache[oldkey]
cache[key] = oldroot
else:
# put result in a new link at the front of the list
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
stats[MISSES] += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
root = nonlocal_root[0]
root[:] = [root, root, None, None]
stats[:] = [0, 0]
wrapper.__wrapped__ = user_function
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function
### End of backported lru_cache
if sys.version_info[:2] >= (3, 3):
# 3.2 has an lru_cache with an incompatible API
from functools import lru_cache
| bsd-3-clause |
mglukhikh/intellij-community | python/lib/Lib/site-packages/django/contrib/admin/views/decorators.py | 78 | 1458 | try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from django import template
from django.shortcuts import render_to_response
from django.utils.translation import ugettext as _
from django.contrib.admin.forms import AdminAuthenticationForm
from django.contrib.auth.views import login
from django.contrib.auth import REDIRECT_FIELD_NAME
def staff_member_required(view_func):
"""
Decorator for views that checks that the user is logged in and is a staff
member, displaying the login page if necessary.
"""
def _checklogin(request, *args, **kwargs):
if request.user.is_active and request.user.is_staff:
# The user is valid. Continue to the admin page.
return view_func(request, *args, **kwargs)
assert hasattr(request, 'session'), "The Django admin requires session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.sessions.middleware.SessionMiddleware'."
defaults = {
'template_name': 'admin/login.html',
'authentication_form': AdminAuthenticationForm,
'extra_context': {
'title': _('Log in'),
'app_path': request.get_full_path(),
REDIRECT_FIELD_NAME: request.get_full_path(),
},
}
return login(request, **defaults)
return wraps(view_func)(_checklogin)
| apache-2.0 |
solomonvimal/pyras | example.py | 1 | 5284 | # -*- coding: utf-8 -*-
"""
"""
from pyras.controllers.hecras import HECRASController, kill_all
project = r'examples\Steady Examples\BEAVCREK.prj'
#project = r'examples\Unsteady Examples\NavigationDam\ROCK_TEST.prj'
#rc = HECRASController('RAS41')
rc = HECRASController('RAS500')
#rc.ShowRas()
# %% Project
rc.Project_Open(project)
res = rc.Project_Current()
print('Project_Current:')
print(res)
print('')
#rc.Compute_HideComputationWindow()
#rc.Compute_ShowComputationWindow()
res = rc.Compute_CurrentPlan()
print('Compute_CurrentPlan:')
print(res)
print('')
#res = rc.Compute_Cancel()
#print('\nCompute_Cancel', res)
#res = rc.Compute_Complete()
#print('Compute_Complete')
#print(res)
#print('')
# %% Curent (Controller Class)
#res = rc.CurrentGeomFile()
#print('CurrentGeomFile')
#print(res)
#print('')
#
#res = rc.CurrentPlanFile()
#print('CurrentPlanFile')
#print(res)
#print('')
#
#res = rc.CurrentProjectFile()
#print('CurrentProjectFile')
#print(res)
#print('')
#
#res = rc.CurrentProjectTitle()
#print('CurrentProjectTitle')
#print(res)
#print('')
#
#res = rc.CurrentSteadyFile()
#print('CurrentSteadyFile')
#print(res)
#print('')
#
#res = rc.CurrentUnSteadyFile()
#print('CurrentUnSteadyFile')
#print(res)
#print('')
# %% Geometry (Geometry Class)
#geo = rc.Geometry()
#
#res = geo.RiverIndex('Beaver Creek')
#print('RiverIndex')
#print(res)
#print('')
#
#res = geo.RiverName(1)
#print('RiverName')
#print(res)
#print('')
#
#res = geo.ReachName(1, 1)
#print('ReachName')
#print(res)
#print('')
#
#res = geo.ReachInvert_nPoints(1, 1)
#print('ReachInvert_nPoints')
#print(res)
#print('')
#
#res = geo.ReachInvert_Points(1, 1)
#print('ReachInvert_Points')
#print(res)
#print('')
#
#res = geo.ReachIndex(1, 'Kentwood')
#print('ReachIndex')
#print(res)
#print('')
#
#res = geo.nRiver()
#print('nRiver')
#print(res)
#print('')
#
#res = geo.nReach(1)
#print('nReach')
#print(res)
#print('')
#
#res = geo.NodeType(1, 1, 1)
#print('NodeType')
#print(res)
#print('')
#
#res = geo.NodeRS(1, 1, 1)
#print('NodeRS')
#print(res)
#print('')
#
#res = geo.NodeIndex(1, 1, '5.99')
#print('NodeIndex')
#print(res)
#print('')
#
#res = geo.NodeCutLine_Points(1, 1, 1)
#print('NodeCutLine_Points')
#print(res)
#print('')
#
#res = geo.NodeCutLine_nPoints(1, 1, 1)
#print('NodeCutLine_nPoints')
#print(res)
#print('')
#
#res = geo.NodeCType(1, 1, 8)
#print('NodeCType')
#print(res)
#print('')
# %% Edit (Controller Class)
#rc.Edit_BC('Beaver Creek', 'Kentwood', '5.99')
#print('Edit_BC')
#print('')
#
#rc.Edit_GeometricData()
#print('Edit_GeometricData')
#print('')
#
#rc.Edit_IW('Beaver Creek', 'Kentwood', '5.99')
#print('Edit_IW')
#print('')
#
#rc.Edit_LW('Beaver Creek', 'Kentwood', '5.99')
#print('Edit_LW')
#print('')
#
#rc.Edit_MultipleRun()
#print('Edit_MultipleRun')
#print('')
#
#rc.Edit_PlanData()
#print('Edit_PlanData')
#print('')
#
#rc.Edit_QuasiUnsteadyFlowData()
#print('Edit_QuasiUnsteadyFlowData')
#print('')
#
#rc.Edit_SedimentData()
#print('Edit_SedimentData')
#print('')
#
#rc.Edit_SteadyFlowData()
#print('Edit_SteadyFlowData')
#print('')
#
#rc.Edit_UnsteadyFlowData()
#print('Edit_UnsteadyFlowData')
#print('')
#
#rc.Edit_WaterQualityData()
#print('Edit_WaterQualityData')
#print('')
#
#rc.Edit_XS('Beaver Creek', 'Kentwood', '5.99')
#print('Edit_XS')
#print('')
# %% Geometry (Controller Class)
# Not tested
#res = rc.Geometery_GISImport(self, title, Filename)
#print('Geometery_GISImport')
#print(res)
#print('')
# Not tested but seems to work
#res = rc.Geometry_GetGateNames(1, 1, '5.39')
#print('Geometry_GetGateNames')
#print(res)
#print('')
# Not working
#res = rc.Geometry_GetGML('Bvr.Cr.+Bridge - P/W: New Le, Lc')
#print('Geometry_GetGML')
#print(res)
#print('')
#res = rc.Geometry_GetNode(1, 1, '5.39')
#print('Geometry_GetNode')
#print(res)
#print('')
#
#res = rc.Geometry_GetNodes(1, 1)
#print('Geometry_GetNodes')
#print(res)
#print('')
#
#res = rc.Geometry_GetReaches(1)
#print('Geometry_GetReaches')
#print(res)
#print('')
#
#res = rc.Geometry_GetRivers()
#print('Geometry_GetRivers')
#print(res)
#print('')
#
#res = rc.Geometry_SetMann('Beaver Creek', 'Kentwood', '5.99',
# 3, (0.12, 0.13, 0.14), (5, 36, 131))
#print('Geometry_SetMann')
#print(res)
#print('')
#
#res = rc.Geometry_SetMann_LChR('Beaver Creek', 'Kentwood', '5.99', 0.15, 0.10,
# 0.16)
#print('Geometry_SetMann_LChR')
#print(res)
#print('')
#
#res = rc.Geometry_SetSAArea('test', 1200)
#print('Geometry_SetSAArea')
#print(res)
#print('')
# %% Get (Controller Class)
res = rc.GetRASVersion()
#print('GetRASVersion')
#print(res)
#print('')
#
#res = rc.HECRASVersion()
#print('HECRASVersion', res)
#print(res)
#print('')
# %% Schematic (Controller Class)
#res = rc.Schematic_ReachCount()
#print('Schematic_ReachCount')
#print(res)
#print('')
#
#res = rc.Schematic_ReachPointCount()
#print('Schematic_ReachPointCount')
#print(res)
#print('')
#
#res = rc.Schematic_ReachPoints()
#print('Schematic_ReachPoints')
#print(res)
#print('')
#
#res = rc.Schematic_XSCount()
#print('Schematic_XSCount')
#print(res)
#print('')
#
#res = rc.Schematic_XSPointCount()
#print('Schematic_XSPointCount')
#print(res)
#print('')
#
#res = rc.Schematic_XSPoints()
#print('Schematic_XSPointCount')
#print(res)
#print('')
rc.close()
kill_all()
| mit |
TuanjieNew/blast | alignment_py.py | 1 | 3687 | #!/usr/bin/env python
#fn:loc_alig.py
def alignment(seqq1,seq2):
len1=len(seq1)
len2=len(seq2)
match=2;
mismatch=-1;
gap=-1
#i=0
#j=0
matrix=[([0] *(len1+1)) for i in range(len2+1)]
matrix[0][0]=0
for j in range(len(seq1)):
j+=1
matrix[0][j]=0
for i in range(len(seq2)):
i=i+1
matrix[i][0]=0
max_i=0
max_j=0
max_score=0
#i=0
#j=0
for i in range(len(seq2)):
i=i+1
for j in range(len(seq1)):
j=j+1
#print(j)
#print(i)
#print(matrix[i])
diag_score=0
left_score=0
up_score=0
letter1=seq1[j-1:j]
letter2=seq2[i-1:i]
if letter1==letter2:
diag_score=matrix[i-1][j-1]+match
else:
diag_score=matrix[i-1][j-1]+mismatch
up_score=matrix[i-1][j]+gap
left_score=matrix[i][j-1]+gap
#print('diag: '+str(diag_score))
#print('up: '+str(up_score))
#print('left: '+str(left_score))
if diag_score <=0 and up_score<=0 and left_score<=0:
matrix[i][j]=0
continue
#choose the highest socre
if diag_score >=up_score:
if diag_score>=left_score:
matrix[i][j]=diag_score
else:
matrix[i][j]=left_score
else:
if left_score>=up_score:
matrix[i][j]=left_score
else:
matrix[i][j]=up_score
#set maximum score
if matrix[i][j]>max_score:
max_i=i
max_j=j
max_score=matrix[i][j]
#trace back
#print('max_j: '+str(max_j))
#print('max_i: '+str(max_i))
align1=''
align2=''
#j=max_j
#i=max_i
j=len1
i=len2
equal_num=0
while 1:
if matrix[i][j]==0:
break
if matrix[i-1][j-1]>=matrix[i-1][j]:
if matrix[i-1][j-1]>=matrix[i][j-1]:
align1=align1+seq1[j-1:j]
align2=align2+seq2[i-1:i]
j-=1
i-=1
else:
align1=align1+seq1[j-1:j]
align2=align2+'-'
j-=1
else:
if matrix[i-1][j]>=matrix[i][j-1]:
align1=align1+'-'
align2=align2+seq2[i-1:i]
i-=1
else:
align1=align1+seq1[j-1:j]
align2=align2+'-'
j-=1
align1=align1[::-1]
align2=align2[::-1]
#number scale
ali_str = '1---'
for i in range(len(align1)):
if (i+1) % 5 ==0:
if i >= 9:
ali_str = ali_str +str(i+1)+'---'
elif i >99:
ali_str = ali_str + str(i+1)+'--'
else:
ali_str = ali_str + str(i+1)+'----'
print('\n')
print('\033[1;31;40m'+'loca: '+'\033[0m'+ali_str[:len(align1)])
print('\033[1;31;40m'+'seq1: '+'\033[0m'+align1)
print('\033[1;31;40m'+'seq2: '+'\033[0m'+align2)
c=0
equ_num=0
non_eq=0
for c in range(len(align1)):
if align1[c]==align2[c]:
equ_num+=1
'''
else:
non_eq+=1
if non_eq>5:
return non_eq+equ_num
'''
return equ_num
seq1='MALWMRLLPLLALLALWGPDPAAAFVNQHLCGSHLVEALYLVCGERGFFYTPKTRREAED'
seq2='MALWMRFLPLLALLVVWEPKPAQAFVKQHLCGPHLVEALYLVCGERGFFYTPKSRREVED'
eq_num=alignment(seq1,seq2)
print('\033[1;31;40m'+'eq_num: '+'\033[0m'+str(eq_num)+'\n')
| gpl-2.0 |
vyscond/cocos | test/test_label_changing.py | 6 | 1555 | from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, t 1.1, s, t 2.1, s, t 3.1, s, q"
tags = "Label, color, text"
import cocos
from cocos.director import director
from cocos.sprite import Sprite
from cocos.actions import Rotate, Repeat, Delay, CallFunc
from cocos.text import Label
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
x,y = director.get_window_size()
self.color1 = [255, 0, 0, 255]
self.color2 = [0, 0, 255, 255]
self.label = Label('', (x//2, y//2))
self.label.do( Rotate( 360, 10 ) )
self.label.do( Repeat( Delay(1) + CallFunc(self.set_color, 0) +
Delay(1) + CallFunc(self.set_color, 1) +
Delay(1) + CallFunc(self.set_color, 2)
))
self.add(self.label)
self.set_color(2)
def set_color(self, color_selector):
colors = [ (255, 32, 64, 255), (0, 240, 100, 255), (90, 90, 250, 255) ]
color = colors[color_selector]
text = "(%s, %s, %s, %s)"%color
self.label.element.text = text
self.label.element.color = color
def main():
director.init()
test_layer = TestLayer ()
main_scene = cocos.scene.Scene (test_layer)
director.run (main_scene)
if __name__ == '__main__':
main()
| bsd-3-clause |
pyfisch/servo | tests/wpt/web-platform-tests/tools/third_party/hyper/hyper/h2/settings.py | 37 | 13412 | # -*- coding: utf-8 -*-
"""
h2/settings
~~~~~~~~~~~
This module contains a HTTP/2 settings object. This object provides a simple
API for manipulating HTTP/2 settings, keeping track of both the current active
state of the settings and the unacknowledged future values of the settings.
"""
import collections
import enum
from hyperframe.frame import SettingsFrame
from h2.errors import ErrorCodes
from h2.exceptions import InvalidSettingsValueError
class SettingCodes(enum.IntEnum):
"""
All known HTTP/2 setting codes.
.. versionadded:: 2.6.0
"""
#: Allows the sender to inform the remote endpoint of the maximum size of
#: the header compression table used to decode header blocks, in octets.
HEADER_TABLE_SIZE = SettingsFrame.HEADER_TABLE_SIZE
#: This setting can be used to disable server push. To disable server push
#: on a client, set this to 0.
ENABLE_PUSH = SettingsFrame.ENABLE_PUSH
#: Indicates the maximum number of concurrent streams that the sender will
#: allow.
MAX_CONCURRENT_STREAMS = SettingsFrame.MAX_CONCURRENT_STREAMS
#: Indicates the sender's initial window size (in octets) for stream-level
#: flow control.
INITIAL_WINDOW_SIZE = SettingsFrame.INITIAL_WINDOW_SIZE
try: # Platform-specific: Hyperframe < 4.0.0
_max_frame_size = SettingsFrame.SETTINGS_MAX_FRAME_SIZE
except AttributeError: # Platform-specific: Hyperframe >= 4.0.0
_max_frame_size = SettingsFrame.MAX_FRAME_SIZE
#: Indicates the size of the largest frame payload that the sender is
#: willing to receive, in octets.
MAX_FRAME_SIZE = _max_frame_size
try: # Platform-specific: Hyperframe < 4.0.0
_max_header_list_size = SettingsFrame.SETTINGS_MAX_HEADER_LIST_SIZE
except AttributeError: # Platform-specific: Hyperframe >= 4.0.0
_max_header_list_size = SettingsFrame.MAX_HEADER_LIST_SIZE
#: This advisory setting informs a peer of the maximum size of header list
#: that the sender is prepared to accept, in octets. The value is based on
#: the uncompressed size of header fields, including the length of the name
#: and value in octets plus an overhead of 32 octets for each header field.
MAX_HEADER_LIST_SIZE = _max_header_list_size
def _setting_code_from_int(code):
"""
Given an integer setting code, returns either one of :class:`SettingCodes
<h2.settings.SettingCodes>` or, if not present in the known set of codes,
returns the integer directly.
"""
try:
return SettingCodes(code)
except ValueError:
return code
# Aliases for all the settings values.
#: Allows the sender to inform the remote endpoint of the maximum size of the
#: header compression table used to decode header blocks, in octets.
#:
#: .. deprecated:: 2.6.0
#: Deprecated in favour of :data:`SettingCodes.HEADER_TABLE_SIZE
#: <h2.settings.SettingCodes.HEADER_TABLE_SIZE>`.
HEADER_TABLE_SIZE = SettingCodes.HEADER_TABLE_SIZE
#: This setting can be used to disable server push. To disable server push on
#: a client, set this to 0.
#:
#: .. deprecated:: 2.6.0
#: Deprecated in favour of :data:`SettingCodes.ENABLE_PUSH
#: <h2.settings.SettingCodes.ENABLE_PUSH>`.
ENABLE_PUSH = SettingCodes.ENABLE_PUSH
#: Indicates the maximum number of concurrent streams that the sender will
#: allow.
#:
#: .. deprecated:: 2.6.0
#: Deprecated in favour of :data:`SettingCodes.MAX_CONCURRENT_STREAMS
#: <h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS>`.
MAX_CONCURRENT_STREAMS = SettingCodes.MAX_CONCURRENT_STREAMS
#: Indicates the sender's initial window size (in octets) for stream-level flow
#: control.
#:
#: .. deprecated:: 2.6.0
#: Deprecated in favour of :data:`SettingCodes.INITIAL_WINDOW_SIZE
#: <h2.settings.SettingCodes.INITIAL_WINDOW_SIZE>`.
INITIAL_WINDOW_SIZE = SettingCodes.INITIAL_WINDOW_SIZE
#: Indicates the size of the largest frame payload that the sender is willing
#: to receive, in octets.
#:
#: .. deprecated:: 2.6.0
#: Deprecated in favour of :data:`SettingCodes.MAX_FRAME_SIZE
#: <h2.settings.SettingCodes.MAX_FRAME_SIZE>`.
MAX_FRAME_SIZE = SettingCodes.MAX_FRAME_SIZE
#: This advisory setting informs a peer of the maximum size of header list that
#: the sender is prepared to accept, in octets. The value is based on the
#: uncompressed size of header fields, including the length of the name and
#: value in octets plus an overhead of 32 octets for each header field.
#:
#: .. deprecated:: 2.6.0
#: Deprecated in favour of :data:`SettingCodes.MAX_HEADER_LIST_SIZE
#: <h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE>`.
MAX_HEADER_LIST_SIZE = SettingCodes.MAX_HEADER_LIST_SIZE
class ChangedSetting:
def __init__(self, setting, original_value, new_value):
#: The setting code given. Either one of :class:`SettingCodes
#: <h2.settings.SettingCodes>` or ``int``
#:
#: .. versionchanged:: 2.6.0
self.setting = setting
#: The original value before being changed.
self.original_value = original_value
#: The new value after being changed.
self.new_value = new_value
def __repr__(self):
return (
"ChangedSetting(setting=%s, original_value=%s, "
"new_value=%s)"
) % (
self.setting,
self.original_value,
self.new_value
)
class Settings(collections.MutableMapping):
"""
An object that encapsulates HTTP/2 settings state.
HTTP/2 Settings are a complex beast. Each party, remote and local, has its
own settings and a view of the other party's settings. When a settings
frame is emitted by a peer it cannot assume that the new settings values
are in place until the remote peer acknowledges the setting. In principle,
multiple settings changes can be "in flight" at the same time, all with
different values.
This object encapsulates this mess. It provides a dict-like interface to
settings, which return the *current* values of the settings in question.
Additionally, it keeps track of the stack of proposed values: each time an
acknowledgement is sent/received, it updates the current values with the
stack of proposed values. On top of all that, it validates the values to
make sure they're allowed, and raises :class:`InvalidSettingsValueError
<h2.exceptions.InvalidSettingsValueError>` if they are not.
Finally, this object understands what the default values of the HTTP/2
settings are, and sets those defaults appropriately.
.. versionchanged:: 2.2.0
Added the ``initial_values`` parameter.
.. versionchanged:: 2.5.0
Added the ``max_header_list_size`` property.
:param client: (optional) Whether these settings should be defaulted for a
client implementation or a server implementation. Defaults to ``True``.
:type client: ``bool``
:param initial_values: (optional) Any initial values the user would like
set, rather than RFC 7540's defaults.
:type initial_vales: ``MutableMapping``
"""
def __init__(self, client=True, initial_values=None):
# Backing object for the settings. This is a dictionary of
# (setting: [list of values]), where the first value in the list is the
# current value of the setting. Strictly this doesn't use lists but
# instead uses collections.deque to avoid repeated memory allocations.
#
# This contains the default values for HTTP/2.
self._settings = {
SettingCodes.HEADER_TABLE_SIZE: collections.deque([4096]),
SettingCodes.ENABLE_PUSH: collections.deque([int(client)]),
SettingCodes.INITIAL_WINDOW_SIZE: collections.deque([65535]),
SettingCodes.MAX_FRAME_SIZE: collections.deque([16384]),
}
if initial_values is not None:
for key, value in initial_values.items():
invalid = _validate_setting(key, value)
if invalid:
raise InvalidSettingsValueError(
"Setting %d has invalid value %d" % (key, value),
error_code=invalid
)
self._settings[key] = collections.deque([value])
def acknowledge(self):
"""
The settings have been acknowledged, either by the user (remote
settings) or by the remote peer (local settings).
:returns: A dict of {setting: ChangedSetting} that were applied.
"""
changed_settings = {}
# If there is more than one setting in the list, we have a setting
# value outstanding. Update them.
for k, v in self._settings.items():
if len(v) > 1:
old_setting = v.popleft()
new_setting = v[0]
changed_settings[k] = ChangedSetting(
k, old_setting, new_setting
)
return changed_settings
# Provide easy-access to well known settings.
@property
def header_table_size(self):
"""
The current value of the :data:`HEADER_TABLE_SIZE
<h2.settings.SettingCodes.HEADER_TABLE_SIZE>` setting.
"""
return self[SettingCodes.HEADER_TABLE_SIZE]
@header_table_size.setter
def header_table_size(self, value):
self[SettingCodes.HEADER_TABLE_SIZE] = value
@property
def enable_push(self):
"""
The current value of the :data:`ENABLE_PUSH
<h2.settings.SettingCodes.ENABLE_PUSH>` setting.
"""
return self[SettingCodes.ENABLE_PUSH]
@enable_push.setter
def enable_push(self, value):
self[SettingCodes.ENABLE_PUSH] = value
@property
def initial_window_size(self):
"""
The current value of the :data:`INITIAL_WINDOW_SIZE
<h2.settings.SettingCodes.INITIAL_WINDOW_SIZE>` setting.
"""
return self[SettingCodes.INITIAL_WINDOW_SIZE]
@initial_window_size.setter
def initial_window_size(self, value):
self[SettingCodes.INITIAL_WINDOW_SIZE] = value
@property
def max_frame_size(self):
"""
The current value of the :data:`MAX_FRAME_SIZE
<h2.settings.SettingCodes.MAX_FRAME_SIZE>` setting.
"""
return self[SettingCodes.MAX_FRAME_SIZE]
@max_frame_size.setter
def max_frame_size(self, value):
self[SettingCodes.MAX_FRAME_SIZE] = value
@property
def max_concurrent_streams(self):
"""
The current value of the :data:`MAX_CONCURRENT_STREAMS
<h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS>` setting.
"""
return self.get(SettingCodes.MAX_CONCURRENT_STREAMS, 2**32+1)
@max_concurrent_streams.setter
def max_concurrent_streams(self, value):
self[SettingCodes.MAX_CONCURRENT_STREAMS] = value
@property
def max_header_list_size(self):
"""
The current value of the :data:`MAX_HEADER_LIST_SIZE
<h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE>` setting. If not set,
returns ``None``, which means unlimited.
.. versionadded:: 2.5.0
"""
return self.get(SettingCodes.MAX_HEADER_LIST_SIZE, None)
@max_header_list_size.setter
def max_header_list_size(self, value):
self[SettingCodes.MAX_HEADER_LIST_SIZE] = value
# Implement the MutableMapping API.
def __getitem__(self, key):
val = self._settings[key][0]
# Things that were created when a setting was received should stay
# KeyError'd.
if val is None:
raise KeyError
return val
def __setitem__(self, key, value):
invalid = _validate_setting(key, value)
if invalid:
raise InvalidSettingsValueError(
"Setting %d has invalid value %d" % (key, value),
error_code=invalid
)
try:
items = self._settings[key]
except KeyError:
items = collections.deque([None])
self._settings[key] = items
items.append(value)
def __delitem__(self, key):
del self._settings[key]
def __iter__(self):
return self._settings.__iter__()
def __len__(self):
return len(self._settings)
def __eq__(self, other):
if isinstance(other, Settings):
return self._settings == other._settings
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, Settings):
return not self == other
else:
return NotImplemented
def _validate_setting(setting, value):
"""
Confirms that a specific setting has a well-formed value. If the setting is
invalid, returns an error code. Otherwise, returns 0 (NO_ERROR).
"""
if setting == SettingCodes.ENABLE_PUSH:
if value not in (0, 1):
return ErrorCodes.PROTOCOL_ERROR
elif setting == SettingCodes.INITIAL_WINDOW_SIZE:
if not 0 <= value <= 2147483647: # 2^31 - 1
return ErrorCodes.FLOW_CONTROL_ERROR
elif setting == SettingCodes.MAX_FRAME_SIZE:
if not 16384 <= value <= 16777215: # 2^14 and 2^24 - 1
return ErrorCodes.PROTOCOL_ERROR
elif setting == SettingCodes.MAX_HEADER_LIST_SIZE:
if value < 0:
return ErrorCodes.PROTOCOL_ERROR
return 0
| mpl-2.0 |
pandeyadarsh/sympy | sympy/core/tests/test_noncommutative.py | 105 | 4010 | """Tests for noncommutative symbols and expressions."""
from sympy import (
adjoint,
cancel,
collect,
combsimp,
conjugate,
cos,
expand,
factor,
posify,
radsimp,
ratsimp,
rcollect,
sin,
simplify,
symbols,
transpose,
trigsimp,
I,
)
from sympy.abc import x, y, z
from sympy.utilities.pytest import XFAIL
A, B, C = symbols("A B C", commutative=False)
X = symbols("X", commutative=False, hermitian=True)
Y = symbols("Y", commutative=False, antihermitian=True)
def test_adjoint():
assert adjoint(A).is_commutative is False
assert adjoint(A*A) == adjoint(A)**2
assert adjoint(A*B) == adjoint(B)*adjoint(A)
assert adjoint(A*B**2) == adjoint(B)**2*adjoint(A)
assert adjoint(A*B - B*A) == adjoint(B)*adjoint(A) - adjoint(A)*adjoint(B)
assert adjoint(A + I*B) == adjoint(A) - I*adjoint(B)
assert adjoint(X) == X
assert adjoint(-I*X) == I*X
assert adjoint(Y) == -Y
assert adjoint(-I*Y) == -I*Y
assert adjoint(X) == conjugate(transpose(X))
assert adjoint(Y) == conjugate(transpose(Y))
assert adjoint(X) == transpose(conjugate(X))
assert adjoint(Y) == transpose(conjugate(Y))
def test_cancel():
assert cancel(A*B - B*A) == A*B - B*A
assert cancel(A*B*(x - 1)) == A*B*(x - 1)
assert cancel(A*B*(x**2 - 1)/(x + 1)) == A*B*(x - 1)
assert cancel(A*B*(x**2 - 1)/(x + 1) - B*A*(x - 1)) == A*B*(x - 1) + (1 - x)*B*A
@XFAIL
def test_collect():
assert collect(A*B - B*A, A) == A*B - B*A
assert collect(A*B - B*A, B) == A*B - B*A
assert collect(A*B - B*A, x) == A*B - B*A
def test_combsimp():
assert combsimp(A*B - B*A) == A*B - B*A
def test_conjugate():
assert conjugate(A).is_commutative is False
assert (A*A).conjugate() == conjugate(A)**2
assert (A*B).conjugate() == conjugate(A)*conjugate(B)
assert (A*B**2).conjugate() == conjugate(A)*conjugate(B)**2
assert (A*B - B*A).conjugate() == \
conjugate(A)*conjugate(B) - conjugate(B)*conjugate(A)
assert (A*B).conjugate() - (B*A).conjugate() == \
conjugate(A)*conjugate(B) - conjugate(B)*conjugate(A)
assert (A + I*B).conjugate() == conjugate(A) - I*conjugate(B)
def test_expand():
assert expand((A*B)**2) == A*B*A*B
assert expand(A*B - B*A) == A*B - B*A
assert expand((A*B/A)**2) == A*B*B/A
assert expand(B*A*(A + B)*B) == B*A**2*B + B*A*B**2
assert expand(B*A*(A + C)*B) == B*A**2*B + B*A*C*B
def test_factor():
assert factor(A*B - B*A) == A*B - B*A
def test_posify():
assert posify(A)[0].is_commutative is False
for q in (A*B/A, (A*B/A)**2, (A*B)**2, A*B - B*A):
p = posify(q)
assert p[0].subs(p[1]) == q
def test_radsimp():
assert radsimp(A*B - B*A) == A*B - B*A
@XFAIL
def test_ratsimp():
assert ratsimp(A*B - B*A) == A*B - B*A
@XFAIL
def test_rcollect():
assert rcollect(A*B - B*A, A) == A*B - B*A
assert rcollect(A*B - B*A, B) == A*B - B*A
assert rcollect(A*B - B*A, x) == A*B - B*A
def test_simplify():
assert simplify(A*B - B*A) == A*B - B*A
def test_subs():
assert (x*y*A).subs(x*y, z) == A*z
assert (x*A*B).subs(x*A, C) == C*B
assert (x*A*x*x).subs(x**2*A, C) == x*C
assert (x*A*x*B).subs(x**2*A, C) == C*B
assert (A**2*B**2).subs(A*B**2, C) == A*C
assert (A*A*A + A*B*A).subs(A*A*A, C) == C + A*B*A
def test_transpose():
assert transpose(A).is_commutative is False
assert transpose(A*A) == transpose(A)**2
assert transpose(A*B) == transpose(B)*transpose(A)
assert transpose(A*B**2) == transpose(B)**2*transpose(A)
assert transpose(A*B - B*A) == \
transpose(B)*transpose(A) - transpose(A)*transpose(B)
assert transpose(A + I*B) == transpose(A) + I*transpose(B)
assert transpose(X) == conjugate(X)
assert transpose(-I*X) == -I*conjugate(X)
assert transpose(Y) == -conjugate(Y)
assert transpose(-I*Y) == I*conjugate(Y)
def test_trigsimp():
assert trigsimp(A*sin(x)**2 + A*cos(x)**2) == A
| bsd-3-clause |
Ebag333/Pyfa | eos/db/migrations/upgrade17.py | 1 | 1167 | """
Migration 17
- Moves all fleet boosters to the new schema
"""
import sqlalchemy
def upgrade(saveddata_engine):
from eos.db import saveddata_session
from eos.db.saveddata.fit import commandFits_table
sql = """
SELECT sm.memberID as boostedFit, s.leaderID AS squadBoost, w.leaderID AS wingBoost, g.leaderID AS gangBoost
FROM squadmembers sm
JOIN squads s ON s.ID = sm.squadID
JOIN wings w on w.ID = s.wingID
JOIN gangs g on g.ID = w.gangID
"""
results = saveddata_session.execute(sql)
inserts = []
for row in results:
boosted = row["boostedFit"]
types = ("squad", "wing", "gang")
for x in types:
value = row["{}Boost".format(x)]
if value is None:
continue
inserts.append({"boosterID": value, "boostedID": boosted, "active": 1})
try:
saveddata_session.execute(commandFits_table.insert(),
{"boosterID": value, "boostedID": boosted, "active": 1})
except Exception:
pass
saveddata_session.commit()
| gpl-3.0 |
NorfairKing/sus-depot | shared/shared/vim/dotvim/bundle/YouCompleteMe/third_party/ycmd/third_party/bottle/test/test_fileupload.py | 41 | 2406 | # -*- coding: utf-8 -*-
''' Tests for the FileUpload wrapper. '''
import unittest
import sys, os.path
import bottle
from bottle import FileUpload, BytesIO
import tempfile
class TestFileUpload(unittest.TestCase):
def test_name(self):
self.assertEqual(FileUpload(None, 'abc', None).name, 'abc')
def test_raw_filename(self):
self.assertEqual(FileUpload(None, None, 'x/x').raw_filename, 'x/x')
def assertFilename(self, bad, good):
fu = FileUpload(None, None, bad)
self.assertEqual(fu.filename, good)
def test_filename(self):
self.assertFilename('with space', 'with-space')
self.assertFilename('with more \t\n\r space', 'with-more-space')
self.assertFilename('with/path', 'path')
self.assertFilename('../path', 'path')
self.assertFilename('..\\path', 'path')
self.assertFilename('..', 'empty')
self.assertFilename('.name.', 'name')
self.assertFilename('.name.cfg', 'name.cfg')
self.assertFilename(' . na me . ', 'na-me')
self.assertFilename('path/', 'empty')
self.assertFilename(bottle.tob('ümläüts$'), 'umlauts')
self.assertFilename(bottle.touni('ümläüts$'), 'umlauts')
self.assertFilename('', 'empty')
self.assertFilename('a'+'b'*1337+'c', 'a'+'b'*254)
def test_preserve_case_issue_582(self):
self.assertFilename('UpperCase', 'UpperCase')
def test_save_buffer(self):
fu = FileUpload(open(__file__, 'rb'), 'testfile', __file__)
buff = BytesIO()
fu.save(buff)
buff.seek(0)
self.assertEqual(fu.file.read(), buff.read())
def test_save_file(self):
fu = FileUpload(open(__file__, 'rb'), 'testfile', __file__)
buff = tempfile.TemporaryFile()
fu.save(buff)
buff.seek(0)
self.assertEqual(fu.file.read(), buff.read())
def test_save_overwrite_lock(self):
fu = FileUpload(open(__file__, 'rb'), 'testfile', __file__)
self.assertRaises(IOError, fu.save, __file__)
def test_save_dir(self):
fu = FileUpload(open(__file__, 'rb'), 'testfile', __file__)
dirpath = tempfile.mkdtemp()
filepath = os.path.join(dirpath, fu.filename)
fu.save(dirpath)
self.assertEqual(fu.file.read(), open(filepath, 'rb').read())
os.unlink(filepath)
os.rmdir(dirpath)
| gpl-2.0 |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/indexes/test_category.py | 3 | 41440 | # -*- coding: utf-8 -*-
import pytest
import pandas.util.testing as tm
from pandas.core.indexes.api import Index, CategoricalIndex
from .common import Base
from pandas.compat import range, PY3
import numpy as np
from pandas import Categorical, IntervalIndex, compat, notna
from pandas.util.testing import assert_almost_equal
import pandas.core.config as cf
import pandas as pd
if PY3:
unicode = lambda x: x
class TestCategoricalIndex(Base):
_holder = CategoricalIndex
def setup_method(self, method):
self.indices = dict(catIndex=tm.makeCategoricalIndex(100))
self.setup_indices()
def create_index(self, categories=None, ordered=False):
if categories is None:
categories = list('cab')
return CategoricalIndex(
list('aabbca'), categories=categories, ordered=ordered)
def test_construction(self):
ci = self.create_index(categories=list('abcd'))
categories = ci.categories
result = Index(ci)
tm.assert_index_equal(result, ci, exact=True)
assert not result.ordered
result = Index(ci.values)
tm.assert_index_equal(result, ci, exact=True)
assert not result.ordered
# empty
result = CategoricalIndex(categories=categories)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes, np.array([], dtype='int8'))
assert not result.ordered
# passing categories
result = CategoricalIndex(list('aabbca'), categories=categories)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, 2, 0], dtype='int8'))
c = pd.Categorical(list('aabbca'))
result = CategoricalIndex(c)
tm.assert_index_equal(result.categories, Index(list('abc')))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, 2, 0], dtype='int8'))
assert not result.ordered
result = CategoricalIndex(c, categories=categories)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, 2, 0], dtype='int8'))
assert not result.ordered
ci = CategoricalIndex(c, categories=list('abcd'))
result = CategoricalIndex(ci)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, 2, 0], dtype='int8'))
assert not result.ordered
result = CategoricalIndex(ci, categories=list('ab'))
tm.assert_index_equal(result.categories, Index(list('ab')))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, -1, 0], dtype='int8'))
assert not result.ordered
result = CategoricalIndex(ci, categories=list('ab'), ordered=True)
tm.assert_index_equal(result.categories, Index(list('ab')))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, -1, 0], dtype='int8'))
assert result.ordered
# turn me to an Index
result = Index(np.array(ci))
assert isinstance(result, Index)
assert not isinstance(result, CategoricalIndex)
def test_construction_with_dtype(self):
# specify dtype
ci = self.create_index(categories=list('abc'))
result = Index(np.array(ci), dtype='category')
tm.assert_index_equal(result, ci, exact=True)
result = Index(np.array(ci).tolist(), dtype='category')
tm.assert_index_equal(result, ci, exact=True)
# these are generally only equal when the categories are reordered
ci = self.create_index()
result = Index(
np.array(ci), dtype='category').reorder_categories(ci.categories)
tm.assert_index_equal(result, ci, exact=True)
# make sure indexes are handled
expected = CategoricalIndex([0, 1, 2], categories=[0, 1, 2],
ordered=True)
idx = Index(range(3))
result = CategoricalIndex(idx, categories=idx, ordered=True)
tm.assert_index_equal(result, expected, exact=True)
def test_create_categorical(self):
# https://github.com/pandas-dev/pandas/pull/17513
# The public CI constructor doesn't hit this code path with
# instances of CategoricalIndex, but we still want to test the code
ci = CategoricalIndex(['a', 'b', 'c'])
# First ci is self, second ci is data.
result = CategoricalIndex._create_categorical(ci, ci)
expected = Categorical(['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
def test_disallow_set_ops(self):
# GH 10039
# set ops (+/-) raise TypeError
idx = pd.Index(pd.Categorical(['a', 'b']))
pytest.raises(TypeError, lambda: idx - idx)
pytest.raises(TypeError, lambda: idx + idx)
pytest.raises(TypeError, lambda: idx - ['a', 'b'])
pytest.raises(TypeError, lambda: idx + ['a', 'b'])
pytest.raises(TypeError, lambda: ['a', 'b'] - idx)
pytest.raises(TypeError, lambda: ['a', 'b'] + idx)
def test_method_delegation(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cabdef'))
result = ci.set_categories(list('cab'))
tm.assert_index_equal(result, CategoricalIndex(
list('aabbca'), categories=list('cab')))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
result = ci.rename_categories(list('efg'))
tm.assert_index_equal(result, CategoricalIndex(
list('ffggef'), categories=list('efg')))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
result = ci.add_categories(['d'])
tm.assert_index_equal(result, CategoricalIndex(
list('aabbca'), categories=list('cabd')))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
result = ci.remove_categories(['c'])
tm.assert_index_equal(result, CategoricalIndex(
list('aabb') + [np.nan] + ['a'], categories=list('ab')))
ci = CategoricalIndex(list('aabbca'), categories=list('cabdef'))
result = ci.as_unordered()
tm.assert_index_equal(result, ci)
ci = CategoricalIndex(list('aabbca'), categories=list('cabdef'))
result = ci.as_ordered()
tm.assert_index_equal(result, CategoricalIndex(
list('aabbca'), categories=list('cabdef'), ordered=True))
# invalid
pytest.raises(ValueError, lambda: ci.set_categories(
list('cab'), inplace=True))
def test_contains(self):
ci = self.create_index(categories=list('cabdef'))
assert 'a' in ci
assert 'z' not in ci
assert 'e' not in ci
assert np.nan not in ci
# assert codes NOT in index
assert 0 not in ci
assert 1 not in ci
ci = CategoricalIndex(
list('aabbca') + [np.nan], categories=list('cabdef'))
assert np.nan in ci
def test_min_max(self):
ci = self.create_index(ordered=False)
pytest.raises(TypeError, lambda: ci.min())
pytest.raises(TypeError, lambda: ci.max())
ci = self.create_index(ordered=True)
assert ci.min() == 'c'
assert ci.max() == 'b'
def test_map(self):
ci = pd.CategoricalIndex(list('ABABC'), categories=list('CBA'),
ordered=True)
result = ci.map(lambda x: x.lower())
exp = pd.CategoricalIndex(list('ababc'), categories=list('cba'),
ordered=True)
tm.assert_index_equal(result, exp)
ci = pd.CategoricalIndex(list('ABABC'), categories=list('BAC'),
ordered=False, name='XXX')
result = ci.map(lambda x: x.lower())
exp = pd.CategoricalIndex(list('ababc'), categories=list('bac'),
ordered=False, name='XXX')
tm.assert_index_equal(result, exp)
# GH 12766: Return an index not an array
tm.assert_index_equal(ci.map(lambda x: 1),
Index(np.array([1] * 5, dtype=np.int64),
name='XXX'))
# change categories dtype
ci = pd.CategoricalIndex(list('ABABC'), categories=list('BAC'),
ordered=False)
def f(x):
return {'A': 10, 'B': 20, 'C': 30}.get(x)
result = ci.map(f)
exp = pd.CategoricalIndex([10, 20, 10, 20, 30],
categories=[20, 10, 30],
ordered=False)
tm.assert_index_equal(result, exp)
def test_where(self):
i = self.create_index()
result = i.where(notna(i))
expected = i
tm.assert_index_equal(result, expected)
i2 = pd.CategoricalIndex([np.nan, np.nan] + i[2:].tolist(),
categories=i.categories)
result = i.where(notna(i2))
expected = i2
tm.assert_index_equal(result, expected)
def test_where_array_like(self):
i = self.create_index()
cond = [False] + [True] * (len(i) - 1)
klasses = [list, tuple, np.array, pd.Series]
expected = pd.CategoricalIndex([np.nan] + i[1:].tolist(),
categories=i.categories)
for klass in klasses:
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_append(self):
ci = self.create_index()
categories = ci.categories
# append cats with the same categories
result = ci[:3].append(ci[3:])
tm.assert_index_equal(result, ci, exact=True)
foos = [ci[:1], ci[1:3], ci[3:]]
result = foos[0].append(foos[1:])
tm.assert_index_equal(result, ci, exact=True)
# empty
result = ci.append([])
tm.assert_index_equal(result, ci, exact=True)
# appending with different categories or reoreded is not ok
pytest.raises(
TypeError,
lambda: ci.append(ci.values.set_categories(list('abcd'))))
pytest.raises(
TypeError,
lambda: ci.append(ci.values.reorder_categories(list('abc'))))
# with objects
result = ci.append(Index(['c', 'a']))
expected = CategoricalIndex(list('aabbcaca'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# invalid objects
pytest.raises(TypeError, lambda: ci.append(Index(['a', 'd'])))
# GH14298 - if base object is not categorical -> coerce to object
result = Index(['c', 'a']).append(ci)
expected = Index(list('caaabbca'))
tm.assert_index_equal(result, expected, exact=True)
def test_insert(self):
ci = self.create_index()
categories = ci.categories
# test 0th element
result = ci.insert(0, 'a')
expected = CategoricalIndex(list('aaabbca'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# test Nth element that follows Python list behavior
result = ci.insert(-1, 'a')
expected = CategoricalIndex(list('aabbcaa'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# test empty
result = CategoricalIndex(categories=categories).insert(0, 'a')
expected = CategoricalIndex(['a'], categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# invalid
pytest.raises(TypeError, lambda: ci.insert(0, 'd'))
def test_delete(self):
ci = self.create_index()
categories = ci.categories
result = ci.delete(0)
expected = CategoricalIndex(list('abbca'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
result = ci.delete(-1)
expected = CategoricalIndex(list('aabbc'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
with pytest.raises((IndexError, ValueError)):
# Either depending on NumPy version
ci.delete(10)
def test_astype(self):
ci = self.create_index()
result = ci.astype('category')
tm.assert_index_equal(result, ci, exact=True)
result = ci.astype(object)
tm.assert_index_equal(result, Index(np.array(ci)))
# this IS equal, but not the same class
assert result.equals(ci)
assert isinstance(result, Index)
assert not isinstance(result, CategoricalIndex)
# interval
ii = IntervalIndex.from_arrays(left=[-0.001, 2.0],
right=[2, 4],
closed='right')
ci = CategoricalIndex(Categorical.from_codes(
[0, 1, -1], categories=ii, ordered=True))
result = ci.astype('interval')
expected = ii.take([0, 1, -1])
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(result.values)
tm.assert_index_equal(result, expected)
def test_reindex_base(self):
# Determined by cat ordering.
idx = CategoricalIndex(list("cab"), categories=list("cab"))
expected = np.arange(len(idx), dtype=np.intp)
actual = idx.get_indexer(idx)
tm.assert_numpy_array_equal(expected, actual)
with tm.assert_raises_regex(ValueError, "Invalid fill method"):
idx.get_indexer(idx, method="invalid")
def test_reindexing(self):
np.random.seed(123456789)
ci = self.create_index()
oidx = Index(np.array(ci))
for n in [1, 2, 5, len(ci)]:
finder = oidx[np.random.randint(0, len(ci), size=n)]
expected = oidx.get_indexer_non_unique(finder)[0]
actual = ci.get_indexer(finder)
tm.assert_numpy_array_equal(expected, actual)
# see gh-17323
#
# Even when indexer is equal to the
# members in the index, we should
# respect duplicates instead of taking
# the fast-track path.
for finder in [list("aabbca"), list("aababca")]:
expected = oidx.get_indexer_non_unique(finder)[0]
actual = ci.get_indexer(finder)
tm.assert_numpy_array_equal(expected, actual)
def test_reindex_dtype(self):
c = CategoricalIndex(['a', 'b', 'c', 'a'])
res, indexer = c.reindex(['a', 'c'])
tm.assert_index_equal(res, Index(['a', 'a', 'c']), exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([0, 3, 2], dtype=np.intp))
c = CategoricalIndex(['a', 'b', 'c', 'a'])
res, indexer = c.reindex(Categorical(['a', 'c']))
exp = CategoricalIndex(['a', 'a', 'c'], categories=['a', 'c'])
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([0, 3, 2], dtype=np.intp))
c = CategoricalIndex(['a', 'b', 'c', 'a'],
categories=['a', 'b', 'c', 'd'])
res, indexer = c.reindex(['a', 'c'])
exp = Index(['a', 'a', 'c'], dtype='object')
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([0, 3, 2], dtype=np.intp))
c = CategoricalIndex(['a', 'b', 'c', 'a'],
categories=['a', 'b', 'c', 'd'])
res, indexer = c.reindex(Categorical(['a', 'c']))
exp = CategoricalIndex(['a', 'a', 'c'], categories=['a', 'c'])
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([0, 3, 2], dtype=np.intp))
def test_reindex_empty_index(self):
# See GH16770
c = CategoricalIndex([])
res, indexer = c.reindex(['a', 'b'])
tm.assert_index_equal(res, Index(['a', 'b']), exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([-1, -1], dtype=np.intp))
def test_is_monotonic(self):
c = CategoricalIndex([1, 2, 3])
assert c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
c = CategoricalIndex([1, 2, 3], ordered=True)
assert c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
c = CategoricalIndex([1, 2, 3], categories=[3, 2, 1])
assert not c.is_monotonic_increasing
assert c.is_monotonic_decreasing
c = CategoricalIndex([1, 3, 2], categories=[3, 2, 1])
assert not c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
c = CategoricalIndex([1, 2, 3], categories=[3, 2, 1], ordered=True)
assert not c.is_monotonic_increasing
assert c.is_monotonic_decreasing
# non lexsorted categories
categories = [9, 0, 1, 2, 3]
c = CategoricalIndex([9, 0], categories=categories)
assert c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
c = CategoricalIndex([0, 1], categories=categories)
assert c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
def test_duplicates(self):
idx = CategoricalIndex([0, 0, 0], name='foo')
assert not idx.is_unique
assert idx.has_duplicates
expected = CategoricalIndex([0], name='foo')
tm.assert_index_equal(idx.drop_duplicates(), expected)
tm.assert_index_equal(idx.unique(), expected)
def test_get_indexer(self):
idx1 = CategoricalIndex(list('aabcde'), categories=list('edabc'))
idx2 = CategoricalIndex(list('abf'))
for indexer in [idx2, list('abf'), Index(list('abf'))]:
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([0, 1, 2, -1], dtype=np.intp))
pytest.raises(NotImplementedError,
lambda: idx2.get_indexer(idx1, method='pad'))
pytest.raises(NotImplementedError,
lambda: idx2.get_indexer(idx1, method='backfill'))
pytest.raises(NotImplementedError,
lambda: idx2.get_indexer(idx1, method='nearest'))
def test_get_loc(self):
# GH 12531
cidx1 = CategoricalIndex(list('abcde'), categories=list('edabc'))
idx1 = Index(list('abcde'))
assert cidx1.get_loc('a') == idx1.get_loc('a')
assert cidx1.get_loc('e') == idx1.get_loc('e')
for i in [cidx1, idx1]:
with pytest.raises(KeyError):
i.get_loc('NOT-EXIST')
# non-unique
cidx2 = CategoricalIndex(list('aacded'), categories=list('edabc'))
idx2 = Index(list('aacded'))
# results in bool array
res = cidx2.get_loc('d')
tm.assert_numpy_array_equal(res, idx2.get_loc('d'))
tm.assert_numpy_array_equal(res, np.array([False, False, False,
True, False, True]))
# unique element results in scalar
res = cidx2.get_loc('e')
assert res == idx2.get_loc('e')
assert res == 4
for i in [cidx2, idx2]:
with pytest.raises(KeyError):
i.get_loc('NOT-EXIST')
# non-unique, slicable
cidx3 = CategoricalIndex(list('aabbb'), categories=list('abc'))
idx3 = Index(list('aabbb'))
# results in slice
res = cidx3.get_loc('a')
assert res == idx3.get_loc('a')
assert res == slice(0, 2, None)
res = cidx3.get_loc('b')
assert res == idx3.get_loc('b')
assert res == slice(2, 5, None)
for i in [cidx3, idx3]:
with pytest.raises(KeyError):
i.get_loc('c')
def test_repr_roundtrip(self):
ci = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)
str(ci)
tm.assert_index_equal(eval(repr(ci)), ci, exact=True)
# formatting
if PY3:
str(ci)
else:
compat.text_type(ci)
# long format
# this is not reprable
ci = CategoricalIndex(np.random.randint(0, 5, size=100))
if PY3:
str(ci)
else:
compat.text_type(ci)
def test_isin(self):
ci = CategoricalIndex(
list('aabca') + [np.nan], categories=['c', 'a', 'b'])
tm.assert_numpy_array_equal(
ci.isin(['c']),
np.array([False, False, False, True, False, False]))
tm.assert_numpy_array_equal(
ci.isin(['c', 'a', 'b']), np.array([True] * 5 + [False]))
tm.assert_numpy_array_equal(
ci.isin(['c', 'a', 'b', np.nan]), np.array([True] * 6))
# mismatched categorical -> coerced to ndarray so doesn't matter
result = ci.isin(ci.set_categories(list('abcdefghi')))
expected = np.array([True] * 6)
tm.assert_numpy_array_equal(result, expected)
result = ci.isin(ci.set_categories(list('defghi')))
expected = np.array([False] * 5 + [True])
tm.assert_numpy_array_equal(result, expected)
def test_identical(self):
ci1 = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)
ci2 = CategoricalIndex(['a', 'b'], categories=['a', 'b', 'c'],
ordered=True)
assert ci1.identical(ci1)
assert ci1.identical(ci1.copy())
assert not ci1.identical(ci2)
def test_ensure_copied_data(self):
# gh-12309: Check the "copy" argument of each
# Index.__new__ is honored.
#
# Must be tested separately from other indexes because
# self.value is not an ndarray.
_base = lambda ar: ar if ar.base is None else ar.base
for index in self.indices.values():
result = CategoricalIndex(index.values, copy=True)
tm.assert_index_equal(index, result)
assert _base(index.values) is not _base(result.values)
result = CategoricalIndex(index.values, copy=False)
assert _base(index.values) is _base(result.values)
def test_equals_categorical(self):
ci1 = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)
ci2 = CategoricalIndex(['a', 'b'], categories=['a', 'b', 'c'],
ordered=True)
assert ci1.equals(ci1)
assert not ci1.equals(ci2)
assert ci1.equals(ci1.astype(object))
assert ci1.astype(object).equals(ci1)
assert (ci1 == ci1).all()
assert not (ci1 != ci1).all()
assert not (ci1 > ci1).all()
assert not (ci1 < ci1).all()
assert (ci1 <= ci1).all()
assert (ci1 >= ci1).all()
assert not (ci1 == 1).all()
assert (ci1 == Index(['a', 'b'])).all()
assert (ci1 == ci1.values).all()
# invalid comparisons
with tm.assert_raises_regex(ValueError, "Lengths must match"):
ci1 == Index(['a', 'b', 'c'])
pytest.raises(TypeError, lambda: ci1 == ci2)
pytest.raises(
TypeError, lambda: ci1 == Categorical(ci1.values, ordered=False))
pytest.raises(
TypeError,
lambda: ci1 == Categorical(ci1.values, categories=list('abc')))
# tests
# make sure that we are testing for category inclusion properly
ci = CategoricalIndex(list('aabca'), categories=['c', 'a', 'b'])
assert not ci.equals(list('aabca'))
# Same categories, but different order
# Unordered
assert ci.equals(CategoricalIndex(list('aabca')))
# Ordered
assert not ci.equals(CategoricalIndex(list('aabca'), ordered=True))
assert ci.equals(ci.copy())
ci = CategoricalIndex(list('aabca') + [np.nan],
categories=['c', 'a', 'b'])
assert not ci.equals(list('aabca'))
assert not ci.equals(CategoricalIndex(list('aabca')))
assert ci.equals(ci.copy())
ci = CategoricalIndex(list('aabca') + [np.nan],
categories=['c', 'a', 'b'])
assert not ci.equals(list('aabca') + [np.nan])
assert ci.equals(CategoricalIndex(list('aabca') + [np.nan]))
assert not ci.equals(CategoricalIndex(list('aabca') + [np.nan],
ordered=True))
assert ci.equals(ci.copy())
def test_string_categorical_index_repr(self):
# short
idx = pd.CategoricalIndex(['a', 'bb', 'ccc'])
if PY3:
expected = u"""CategoricalIndex(['a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'a', u'bb', u'ccc'], categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# multiple lines
idx = pd.CategoricalIndex(['a', 'bb', 'ccc'] * 10)
if PY3:
expected = u"""CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb',
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc',
u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],
categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# truncated
idx = pd.CategoricalIndex(['a', 'bb', 'ccc'] * 100)
if PY3:
expected = u"""CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
...
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
categories=['a', 'bb', 'ccc'], ordered=False, dtype='category', length=300)""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',
u'ccc', u'a',
...
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
u'bb', u'ccc'],
categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category', length=300)""" # noqa
assert unicode(idx) == expected
# larger categories
idx = pd.CategoricalIndex(list('abcdefghijklmmo'))
if PY3:
expected = u"""CategoricalIndex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
'm', 'm', 'o'],
categories=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', ...], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'a', u'b', u'c', u'd', u'e', u'f', u'g', u'h', u'i', u'j',
u'k', u'l', u'm', u'm', u'o'],
categories=[u'a', u'b', u'c', u'd', u'e', u'f', u'g', u'h', ...], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# short
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'])
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう'], categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# multiple lines
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 10)
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',
'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],
categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',
u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう',
u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう'],
categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# truncated
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 100)
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',
...
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],
categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',
u'ううう', u'あ',
...
u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう'],
categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category', length=300)""" # noqa
assert unicode(idx) == expected
# larger categories
idx = pd.CategoricalIndex(list(u'あいうえおかきくけこさしすせそ'))
if PY3:
expected = u"""CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し',
'す', 'せ', 'そ'],
categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', u'け', u'こ',
u'さ', u'し', u'す', u'せ', u'そ'],
categories=[u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', ...], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# Emable Unicode option -----------------------------------------
with cf.option_context('display.unicode.east_asian_width', True):
# short
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'])
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう'], categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# multiple lines
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 10)
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',
'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],
categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう'],
categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# truncated
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 100)
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ',
...
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',
'あ', 'いい', 'ううう'],
categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ',
...
u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',
u'ううう', u'あ', u'いい', u'ううう'],
categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category', length=300)""" # noqa
assert unicode(idx) == expected
# larger categories
idx = pd.CategoricalIndex(list(u'あいうえおかきくけこさしすせそ'))
if PY3:
expected = u"""CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ',
'さ', 'し', 'す', 'せ', 'そ'],
categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く',
u'け', u'こ', u'さ', u'し', u'す', u'せ', u'そ'],
categories=[u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', ...], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
def test_fillna_categorical(self):
# GH 11343
idx = CategoricalIndex([1.0, np.nan, 3.0, 1.0], name='x')
# fill by value in categories
exp = CategoricalIndex([1.0, 1.0, 3.0, 1.0], name='x')
tm.assert_index_equal(idx.fillna(1.0), exp)
# fill by value not in categories raises ValueError
with tm.assert_raises_regex(ValueError,
'fill value must be in categories'):
idx.fillna(2.0)
def test_take_fill_value(self):
# GH 12631
# numeric category
idx = pd.CategoricalIndex([1, 2, 3], name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = pd.CategoricalIndex([2, 1, 3], name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.CategoricalIndex([2, 1, np.nan], categories=[1, 2, 3],
name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.CategoricalIndex([2, 1, 3], name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# object category
idx = pd.CategoricalIndex(list('CBA'), categories=list('ABC'),
ordered=True, name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = pd.CategoricalIndex(list('BCA'), categories=list('ABC'),
ordered=True, name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.CategoricalIndex(['B', 'C', np.nan],
categories=list('ABC'), ordered=True,
name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.CategoricalIndex(list('BCA'), categories=list('ABC'),
ordered=True, name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def test_take_fill_value_datetime(self):
# datetime category
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
name='xxx')
idx = pd.CategoricalIndex(idx)
result = idx.take(np.array([1, 0, -1]))
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx')
expected = pd.CategoricalIndex(expected)
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', 'NaT'],
name='xxx')
exp_cats = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'])
expected = pd.CategoricalIndex(expected, categories=exp_cats)
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx')
expected = pd.CategoricalIndex(expected)
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def test_take_invalid_kwargs(self):
idx = pd.CategoricalIndex([1, 2, 3], name='foo')
indices = [1, 0, -1]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
| apache-2.0 |
sogelink/ansible | lib/ansible/modules/system/svc.py | 26 | 9578 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Brian Coca <bcoca@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: svc
author: "Brian Coca (@bcoca)"
version_added: "1.9"
short_description: Manage daemontools services.
description:
- Controls daemontools services on remote hosts using the svc utility.
options:
name:
required: true
description:
- Name of the service to manage.
state:
required: false
choices: [ started, stopped, restarted, reloaded, once, killed ]
description:
- C(Started)/C(stopped) are idempotent actions that will not run
commands unless necessary. C(restarted) will always bounce the
svc (svc -t) and C(killed) will always bounce the svc (svc -k).
C(reloaded) will send a sigusr1 (svc -1).
C(once) will run a normally downed svc once (svc -o), not really
an idempotent operation.
downed:
required: false
choices: [ "yes", "no" ]
default: no
description:
- Should a 'down' file exist or not, if it exists it disables auto startup.
defaults to no. Downed does not imply stopped.
enabled:
required: false
choices: [ "yes", "no" ]
description:
- Wheater the service is enabled or not, if disabled it also implies stopped.
Make note that a service can be enabled and downed (no auto restart).
service_dir:
required: false
default: /service
description:
- directory svscan watches for services
service_src:
required: false
description:
- directory where services are defined, the source of symlinks to service_dir.
'''
EXAMPLES = '''
# Example action to start svc dnscache, if not running
- svc:
name: dnscache
state: started
# Example action to stop svc dnscache, if running
- svc:
name: dnscache
state: stopped
# Example action to kill svc dnscache, in all cases
- svc:
name: dnscache
state: killed
# Example action to restart svc dnscache, in all cases
- svc:
name: dnscache
state: restarted
# Example action to reload svc dnscache, in all cases
- svc:
name: dnscache
state: reloaded
# Example using alt svc directory location
- svc:
name: dnscache
state: reloaded
service_dir: /var/service
'''
import os
import re
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def _load_dist_subclass(cls, *args, **kwargs):
'''
Used for derivative implementations
'''
subclass = None
distro = kwargs['module'].params['distro']
# get the most specific superclass for this platform
if distro is not None:
for sc in cls.__subclasses__():
if sc.distro is not None and sc.distro == distro:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
class Svc(object):
"""
Main class that handles daemontools, can be subclassed and overridden in case
we want to use a 'derivative' like encore, s6, etc
"""
#def __new__(cls, *args, **kwargs):
# return _load_dist_subclass(cls, args, kwargs)
def __init__(self, module):
self.extra_paths = [ '/command', '/usr/local/bin' ]
self.report_vars = ['state', 'enabled', 'downed', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
self.module = module
self.name = module.params['name']
self.service_dir = module.params['service_dir']
self.service_src = module.params['service_src']
self.enabled = None
self.downed = None
self.full_state = None
self.state = None
self.pid = None
self.duration = None
self.svc_cmd = module.get_bin_path('svc', opt_dirs=self.extra_paths)
self.svstat_cmd = module.get_bin_path('svstat', opt_dirs=self.extra_paths)
self.svc_full = '/'.join([ self.service_dir, self.name ])
self.src_full = '/'.join([ self.service_src, self.name ])
self.enabled = os.path.lexists(self.svc_full)
if self.enabled:
self.downed = os.path.lexists('%s/down' % self.svc_full)
self.get_status()
else:
self.downed = os.path.lexists('%s/down' % self.src_full)
self.state = 'stopped'
def enable(self):
if os.path.exists(self.src_full):
try:
os.symlink(self.src_full, self.svc_full)
except OSError as e:
self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e))
else:
self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
def disable(self):
try:
os.unlink(self.svc_full)
except OSError as e:
self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e))
self.execute_command([self.svc_cmd,'-dx',self.src_full])
src_log = '%s/log' % self.src_full
if os.path.exists(src_log):
self.execute_command([self.svc_cmd,'-dx',src_log])
def get_status(self):
(rc, out, err) = self.execute_command([self.svstat_cmd, self.svc_full])
if err is not None and err:
self.full_state = self.state = err
else:
self.full_state = out
m = re.search('\(pid (\d+)\)', out)
if m:
self.pid = m.group(1)
m = re.search('(\d+) seconds', out)
if m:
self.duration = m.group(1)
if re.search(' up ', out):
self.state = 'start'
elif re.search(' down ', out):
self.state = 'stopp'
else:
self.state = 'unknown'
return
if re.search(' want ', out):
self.state += 'ing'
else:
self.state += 'ed'
def start(self):
return self.execute_command([self.svc_cmd, '-u', self.svc_full])
def stopp(self):
return self.stop()
def stop(self):
return self.execute_command([self.svc_cmd, '-d', self.svc_full])
def once(self):
return self.execute_command([self.svc_cmd, '-o', self.svc_full])
def reload(self):
return self.execute_command([self.svc_cmd, '-1', self.svc_full])
def restart(self):
return self.execute_command([self.svc_cmd, '-t', self.svc_full])
def kill(self):
return self.execute_command([self.svc_cmd, '-k', self.svc_full])
def execute_command(self, cmd):
try:
(rc, out, err) = self.module.run_command(' '.join(cmd))
except Exception as e:
self.module.fail_json(msg="failed to execute: %s" % to_native(e), exception=traceback.format_exc())
return (rc, out, err)
def report(self):
self.get_status()
states = {}
for k in self.report_vars:
states[k] = self.__dict__[k]
return states
# ===========================================
# Main control flow
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(choices=['started', 'stopped', 'restarted', 'killed', 'reloaded', 'once']),
enabled = dict(required=False, type='bool'),
downed = dict(required=False, type='bool'),
dist = dict(required=False, default='daemontools'),
service_dir = dict(required=False, default='/service'),
service_src = dict(required=False, default='/etc/service'),
),
supports_check_mode=True,
)
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
state = module.params['state']
enabled = module.params['enabled']
downed = module.params['downed']
svc = Svc(module)
changed = False
orig_state = svc.report()
if enabled is not None and enabled != svc.enabled:
changed = True
if not module.check_mode:
try:
if enabled:
svc.enable()
else:
svc.disable()
except (OSError, IOError) as e:
module.fail_json(msg="Could change service link: %s" % to_native(e))
if state is not None and state != svc.state:
changed = True
if not module.check_mode:
getattr(svc,state[:-2])()
if downed is not None and downed != svc.downed:
changed = True
if not module.check_mode:
d_file = "%s/down" % svc.svc_full
try:
if downed:
open(d_file, "a").close()
else:
os.unlink(d_file)
except (OSError, IOError) as e:
module.fail_json(msg="Could change downed file: %s " % (to_native(e)))
module.exit_json(changed=changed, svc=svc.report())
if __name__ == '__main__':
main()
| gpl-3.0 |
kawamon/hue | apps/impala/gen-py/ErrorCodes/constants.py | 2 | 5231 | #
# Autogenerated by Thrift Compiler (0.13.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:new_style
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from .ttypes import *
TErrorMessage = [
"",
"$0",
"$0",
"$0",
"$0",
"$0",
"$0",
"$0",
"$0",
"Parquet files should not be split into multiple hdfs-blocks. file=$0",
"Column metadata states there are $0 values, but read $1 values from column $2. file=$3",
"(unused)",
"ParquetScanner: reached EOF while deserializing data page header. file=$0",
"Metadata states that in group $0($1) there are $2 rows, but $3 rows were read.",
"(unused)",
"File '$0' column '$1' does not have the decimal precision set.",
"File '$0' column '$1' has a precision that does not match the table metadata precision. File metadata precision: $2, table metadata precision: $3.",
"File '$0' column '$1' does not have converted type set to DECIMAL",
"File '$0' column '$1' contains decimal data but the table metadata has type $2",
"Problem parsing file $0 at $1$2",
"Decompressor: block size is too big. Data is likely corrupt. Size: $0",
"Decompressor: invalid compressed length. Data is likely corrupt.",
"Snappy: GetUncompressedLength failed",
"SnappyBlock: RawUncompress failed",
"Snappy: Decompressed size is not correct.",
"Unknown disk id. This will negatively affect performance. Check your hdfs settings to enable block location metadata.",
"Reserved resource size ($0) is larger than query mem limit ($1), and will be restricted to $1. Configure the reservation size by setting RM_INITIAL_MEM.",
"Cannot perform join at hash join node with id $0. The input data was partitioned the maximum number of $1 times. This could mean there is significant skew in the data or the memory limit is set too low.",
"Cannot perform aggregation at hash aggregation node with id $0. The input data was partitioned the maximum number of $1 times. This could mean there is significant skew in the data or the memory limit is set too low.",
"Builtin '$0' with symbol '$1' does not exist. Verify that all your impalads are the same version.",
"RPC Error: $0",
"RPC timed out",
"Failed to verify function $0 from LLVM module $1, see log for more details.",
"File $0 corrupt. RLE level data bytes = $1",
"Column '$0' has conflicting Avro decimal types. Table schema $1: $2, file schema $1: $3",
"Column '$0' has conflicting Avro decimal types. Declared $1: $2, $1 in table's Avro schema: $3",
"Unresolvable types for column '$0': table type: $1, file type: $2",
"Unresolvable types for column '$0': declared column type: $1, table's Avro schema type: $2",
"Field $0 is missing from file and default values of type $1 are not yet supported.",
"Inconsistent table metadata. Mismatch between column definition and Avro schema: cannot read field $0 because there are only $1 fields.",
"Field $0 is missing from file and does not have a default value.",
"Field $0 is nullable in the file schema but not the table schema.",
"Inconsistent table metadata. Field $0 is not a record in the Avro schema.",
"Could not read definition level, even though metadata states there are $0 values remaining in data page. file=$1",
"Mismatched number of values in column index $0 ($1 vs. $2). file=$3",
"Failed to decode dictionary-encoded value. file=$0",
"SSL private-key password command ('$0') failed with error: $1",
"The SSL certificate path is blank",
"The SSL private key path is blank",
"The SSL certificate file does not exist at path $0",
"The SSL private key file does not exist at path $0",
"SSL socket creation failed: $0",
"Memory allocation of $0 bytes failed",
"Could not read repetition level, even though metadata states there are $0 values remaining in data page. file=$1",
"File '$0' has an incompatible Parquet schema for column '$1'. Column type: $2, Parquet schema:\n$3",
"Failed to allocate buffer for collection '$0'.",
"Temporary device for directory $0 is blacklisted from a previous error and cannot be used.",
"Temporary file $0 is blacklisted from a previous error and cannot be expanded.",
"RPC client failed to connect: $0",
"Metadata for file '$0' appears stale. Try running \"refresh $1\" to reload the file metadata.",
"File '$0' has an invalid version number: $1\nThis could be due to stale metadata. Try running \"refresh $2\".",
"Tried to read $0 bytes but could only read $1 bytes. This may indicate data file corruption. (file $2, byte offset: $3)",
"Invalid read of $0 bytes. This may indicate data file corruption. (file $1, byte offset: $2)",
"File '$0' has an invalid version header: $1\nMake sure the file is an Avro data file.",
"Enabling server-to-server SSL connections in conjunction with Kerberos authentication is not supported at the same time. Disable server-to-server SSL by unsetting --ssl_client_ca_certificate.",
]
| apache-2.0 |
suncycheng/intellij-community | python/lib/Lib/SocketServer.py | 70 | 18685 | """Generic socket server classes.
This module tries to capture the various aspects of defining a server:
For socket-based servers:
- address family:
- AF_INET{,6}: IP (Internet Protocol) sockets (default)
- AF_UNIX: Unix domain sockets
- others, e.g. AF_DECNET are conceivable (see <socket.h>
- socket type:
- SOCK_STREAM (reliable stream, e.g. TCP)
- SOCK_DGRAM (datagrams, e.g. UDP)
For request-based servers (including socket-based):
- client address verification before further looking at the request
(This is actually a hook for any processing that needs to look
at the request before anything else, e.g. logging)
- how to handle multiple requests:
- synchronous (one request is handled at a time)
- forking (each request is handled by a new process)
- threading (each request is handled by a new thread)
The classes in this module favor the server type that is simplest to
write: a synchronous TCP/IP server. This is bad class design, but
save some typing. (There's also the issue that a deep class hierarchy
slows down method lookups.)
There are five classes in an inheritance diagram, four of which represent
synchronous servers of four types:
+------------+
| BaseServer |
+------------+
|
v
+-----------+ +------------------+
| TCPServer |------->| UnixStreamServer |
+-----------+ +------------------+
|
v
+-----------+ +--------------------+
| UDPServer |------->| UnixDatagramServer |
+-----------+ +--------------------+
Note that UnixDatagramServer derives from UDPServer, not from
UnixStreamServer -- the only difference between an IP and a Unix
stream server is the address family, which is simply repeated in both
unix server classes.
Forking and threading versions of each type of server can be created
using the ForkingMixIn and ThreadingMixIn mix-in classes. For
instance, a threading UDP server class is created as follows:
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
The Mix-in class must come first, since it overrides a method defined
in UDPServer! Setting the various member variables also changes
the behavior of the underlying server mechanism.
To implement a service, you must derive a class from
BaseRequestHandler and redefine its handle() method. You can then run
various versions of the service by combining one of the server classes
with your request handler class.
The request handler class must be different for datagram or stream
services. This can be hidden by using the request handler
subclasses StreamRequestHandler or DatagramRequestHandler.
Of course, you still have to use your head!
For instance, it makes no sense to use a forking server if the service
contains state in memory that can be modified by requests (since the
modifications in the child process would never reach the initial state
kept in the parent process and passed to each child). In this case,
you can use a threading server, but you will probably have to use
locks to avoid two requests that come in nearly simultaneous to apply
conflicting changes to the server state.
On the other hand, if you are building e.g. an HTTP server, where all
data is stored externally (e.g. in the file system), a synchronous
class will essentially render the service "deaf" while one request is
being handled -- which may be for a very long time if a client is slow
to reqd all the data it has requested. Here a threading or forking
server is appropriate.
In some cases, it may be appropriate to process part of a request
synchronously, but to finish processing in a forked child depending on
the request data. This can be implemented by using a synchronous
server and doing an explicit fork in the request handler class
handle() method.
Another approach to handling multiple simultaneous requests in an
environment that supports neither threads nor fork (or where these are
too expensive or inappropriate for the service) is to maintain an
explicit table of partially finished requests and to use select() to
decide which request to work on next (or whether to handle a new
incoming request). This is particularly important for stream services
where each client can potentially be connected for a long time (if
threads or subprocesses cannot be used).
Future work:
- Standard classes for Sun RPC (which uses either UDP or TCP)
- Standard mix-in classes to implement various authentication
and encryption schemes
- Standard framework for select-based multiplexing
XXX Open problems:
- What to do with out-of-band data?
BaseServer:
- split generic "request" functionality out into BaseServer class.
Copyright (C) 2000 Luke Kenneth Casson Leighton <lkcl@samba.org>
example: read entries from a SQL database (requires overriding
get_request() to return a table entry from the database).
entry is processed by a RequestHandlerClass.
"""
# Author of the BaseServer patch: Luke Kenneth Casson Leighton
# XXX Warning!
# There is a test suite for this module, but it cannot be run by the
# standard regression test.
# To run it manually, run Lib/test/test_socketserver.py.
__version__ = "0.4"
import socket
import sys
import os
__all__ = ["TCPServer","UDPServer","ForkingUDPServer","ForkingTCPServer",
"ThreadingUDPServer","ThreadingTCPServer","BaseRequestHandler",
"StreamRequestHandler","DatagramRequestHandler",
"ThreadingMixIn", "ForkingMixIn"]
if hasattr(socket, "AF_UNIX"):
__all__.extend(["UnixStreamServer","UnixDatagramServer",
"ThreadingUnixStreamServer",
"ThreadingUnixDatagramServer"])
class BaseServer:
"""Base class for server classes.
Methods for the caller:
- __init__(server_address, RequestHandlerClass)
- serve_forever()
- handle_request() # if you do not use serve_forever()
- fileno() -> int # for select()
Methods that may be overridden:
- server_bind()
- server_activate()
- get_request() -> request, client_address
- verify_request(request, client_address)
- server_close()
- process_request(request, client_address)
- close_request(request)
- handle_error()
Methods for derived classes:
- finish_request(request, client_address)
Class variables that may be overridden by derived classes or
instances:
- address_family
- socket_type
- allow_reuse_address
Instance variables:
- RequestHandlerClass
- socket
"""
def __init__(self, server_address, RequestHandlerClass):
"""Constructor. May be extended, do not override."""
self.server_address = server_address
self.RequestHandlerClass = RequestHandlerClass
def server_activate(self):
"""Called by constructor to activate the server.
May be overridden.
"""
pass
def serve_forever(self):
"""Handle one request at a time until doomsday."""
while 1:
self.handle_request()
# The distinction between handling, getting, processing and
# finishing a request is fairly arbitrary. Remember:
#
# - handle_request() is the top-level call. It calls
# get_request(), verify_request() and process_request()
# - get_request() is different for stream or datagram sockets
# - process_request() is the place that may fork a new process
# or create a new thread to finish the request
# - finish_request() instantiates the request handler class;
# this constructor will handle the request all by itself
def handle_request(self):
"""Handle one request, possibly blocking."""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except:
self.handle_error(request, client_address)
self.close_request(request)
def verify_request(self, request, client_address):
"""Verify the request. May be overridden.
Return True if we should proceed with this request.
"""
return True
def process_request(self, request, client_address):
"""Call finish_request.
Overridden by ForkingMixIn and ThreadingMixIn.
"""
self.finish_request(request, client_address)
self.close_request(request)
def server_close(self):
"""Called to clean-up the server.
May be overridden.
"""
pass
def finish_request(self, request, client_address):
"""Finish one request by instantiating RequestHandlerClass."""
self.RequestHandlerClass(request, client_address, self)
def close_request(self, request):
"""Called to clean up an individual request."""
pass
def handle_error(self, request, client_address):
"""Handle an error gracefully. May be overridden.
The default is to print a traceback and continue.
"""
print '-'*40
print 'Exception happened during processing of request from',
print client_address
import traceback
traceback.print_exc() # XXX But this goes to stderr!
print '-'*40
class TCPServer(BaseServer):
"""Base class for various socket-based server classes.
Defaults to synchronous IP stream (i.e., TCP).
Methods for the caller:
- __init__(server_address, RequestHandlerClass)
- serve_forever()
- handle_request() # if you don't use serve_forever()
- fileno() -> int # for select()
Methods that may be overridden:
- server_bind()
- server_activate()
- get_request() -> request, client_address
- verify_request(request, client_address)
- process_request(request, client_address)
- close_request(request)
- handle_error()
Methods for derived classes:
- finish_request(request, client_address)
Class variables that may be overridden by derived classes or
instances:
- address_family
- socket_type
- request_queue_size (only for stream sockets)
- allow_reuse_address
Instance variables:
- server_address
- RequestHandlerClass
- socket
"""
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 5
allow_reuse_address = False
def __init__(self, server_address, RequestHandlerClass):
"""Constructor. May be extended, do not override."""
BaseServer.__init__(self, server_address, RequestHandlerClass)
self.socket = socket.socket(self.address_family,
self.socket_type)
self.server_bind()
self.server_activate()
def server_bind(self):
"""Called by constructor to bind the socket.
May be overridden.
"""
if self.allow_reuse_address:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
self.server_address = self.socket.getsockname()
def server_activate(self):
"""Called by constructor to activate the server.
May be overridden.
"""
self.socket.listen(self.request_queue_size)
# Adding a second call to getsockname() because of this issue
# http://wiki.python.org/jython/NewSocketModule#Deferredsocketcreationonjython
self.server_address = self.socket.getsockname()
def server_close(self):
"""Called to clean-up the server.
May be overridden.
"""
self.socket.close()
def fileno(self):
"""Return socket file number.
Interface required by select().
"""
return self.socket.fileno()
def get_request(self):
"""Get the request and client address from the socket.
May be overridden.
"""
return self.socket.accept()
def close_request(self, request):
"""Called to clean up an individual request."""
request.close()
class UDPServer(TCPServer):
"""UDP server class."""
allow_reuse_address = False
socket_type = socket.SOCK_DGRAM
max_packet_size = 8192
def get_request(self):
data, client_addr = self.socket.recvfrom(self.max_packet_size)
return (data, self.socket), client_addr
def server_activate(self):
# No need to call listen() for UDP.
pass
def close_request(self, request):
# No need to close anything.
pass
class ForkingMixIn:
"""Mix-in class to handle each request in a new process."""
active_children = None
max_children = 40
def collect_children(self):
"""Internal routine to wait for died children."""
while self.active_children:
if len(self.active_children) < self.max_children:
options = os.WNOHANG
else:
# If the maximum number of children are already
# running, block while waiting for a child to exit
options = 0
try:
pid, status = os.waitpid(0, options)
except os.error:
pid = None
if not pid: break
self.active_children.remove(pid)
def process_request(self, request, client_address):
"""Fork a new subprocess to process the request."""
self.collect_children()
pid = os.fork()
if pid:
# Parent process
if self.active_children is None:
self.active_children = []
self.active_children.append(pid)
self.close_request(request)
return
else:
# Child process.
# This must never return, hence os._exit()!
try:
self.finish_request(request, client_address)
os._exit(0)
except:
try:
self.handle_error(request, client_address)
finally:
os._exit(1)
class ThreadingMixIn:
"""Mix-in class to handle each request in a new thread."""
# Decides how threads will act upon termination of the
# main process
daemon_threads = False
def process_request_thread(self, request, client_address):
"""Same as in BaseServer but as a thread.
In addition, exception handling is done here.
"""
try:
self.finish_request(request, client_address)
self.close_request(request)
except:
self.handle_error(request, client_address)
self.close_request(request)
def process_request(self, request, client_address):
"""Start a new thread to process the request."""
import threading
t = threading.Thread(target = self.process_request_thread,
args = (request, client_address))
if self.daemon_threads:
t.setDaemon (1)
t.start()
class ForkingUDPServer(ForkingMixIn, UDPServer): pass
class ForkingTCPServer(ForkingMixIn, TCPServer): pass
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass
if hasattr(socket, 'AF_UNIX'):
class UnixStreamServer(TCPServer):
address_family = socket.AF_UNIX
class UnixDatagramServer(UDPServer):
address_family = socket.AF_UNIX
class ThreadingUnixStreamServer(ThreadingMixIn, UnixStreamServer): pass
class ThreadingUnixDatagramServer(ThreadingMixIn, UnixDatagramServer): pass
class BaseRequestHandler:
"""Base class for request handler classes.
This class is instantiated for each request to be handled. The
constructor sets the instance variables request, client_address
and server, and then calls the handle() method. To implement a
specific service, all you need to do is to derive a class which
defines a handle() method.
The handle() method can find the request as self.request, the
client address as self.client_address, and the server (in case it
needs access to per-server information) as self.server. Since a
separate instance is created for each request, the handle() method
can define arbitrary other instance variariables.
"""
def __init__(self, request, client_address, server):
self.request = request
self.client_address = client_address
self.server = server
self.setup()
try:
self.handle()
finally:
self.finish()
def setup(self):
pass
def handle(self):
pass
def finish(self):
pass
# The following two classes make it possible to use the same service
# class for stream or datagram servers.
# Each class sets up these instance variables:
# - rfile: a file object from which receives the request is read
# - wfile: a file object to which the reply is written
# When the handle() method returns, wfile is flushed properly
class StreamRequestHandler(BaseRequestHandler):
"""Define self.rfile and self.wfile for stream sockets."""
# Default buffer sizes for rfile, wfile.
# We default rfile to buffered because otherwise it could be
# really slow for large data (a getc() call per byte); we make
# wfile unbuffered because (a) often after a write() we want to
# read and we need to flush the line; (b) big writes to unbuffered
# files are typically optimized by stdio even when big reads
# aren't.
rbufsize = -1
wbufsize = 0
def setup(self):
self.connection = self.request
self.rfile = self.connection.makefile('rb', self.rbufsize)
self.wfile = self.connection.makefile('wb', self.wbufsize)
def finish(self):
if not self.wfile.closed:
self.wfile.flush()
self.wfile.close()
self.rfile.close()
class DatagramRequestHandler(BaseRequestHandler):
# XXX Regrettably, I cannot get this working on Linux;
# s.recvfrom() doesn't return a meaningful client address.
"""Define self.rfile and self.wfile for datagram sockets."""
def setup(self):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
self.packet, self.socket = self.request
self.rfile = StringIO(self.packet)
self.wfile = StringIO()
def finish(self):
self.socket.sendto(self.wfile.getvalue(), self.client_address)
| apache-2.0 |
kim42083/webm.libwebp | swig/setup.py | 118 | 1200 | #!/usr/bin/python
"""distutils script for libwebp python module."""
from distutils.core import setup
from distutils.extension import Extension
import os
import shutil
import tempfile
tmpdir = tempfile.mkdtemp()
package = "com.google.webp"
package_path = os.path.join(tmpdir, *package.split("."))
os.makedirs(package_path)
# Create __init_.py files along the package path.
initpy_path = tmpdir
for d in package.split("."):
initpy_path = os.path.join(initpy_path, d)
open(os.path.join(initpy_path, "__init__.py"), "w").close()
shutil.copy2("libwebp.py", package_path)
setup(name="libwebp",
version="0.0",
description="libwebp python wrapper",
long_description="Provides access to 'simple' libwebp decode interface",
license="BSD",
url="http://developers.google.com/speed/webp",
ext_package=package,
ext_modules=[Extension("_libwebp",
["libwebp_python_wrap.c"],
libraries=["webp"],
),
],
package_dir={"": tmpdir},
packages=["com", "com.google", "com.google.webp"],
py_modules=[package + ".libwebp"],
)
shutil.rmtree(tmpdir)
| bsd-3-clause |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.7/Lib/plat-irix5/flp.py | 64 | 13387 | #
# flp - Module to load fl forms from fd files
#
# Jack Jansen, December 1991
#
from warnings import warnpy3k
warnpy3k("the flp module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import string
import os
import sys
import FL
SPLITLINE = '--------------------'
FORMLINE = '=============== FORM ==============='
ENDLINE = '=============================='
class error(Exception):
pass
##################################################################
# Part 1 - The parsing routines #
##################################################################
#
# Externally visible function. Load form.
#
def parse_form(filename, formname):
forms = checkcache(filename)
if forms is None:
forms = parse_forms(filename)
if forms.has_key(formname):
return forms[formname]
else:
raise error, 'No such form in fd file'
#
# Externally visible function. Load all forms.
#
def parse_forms(filename):
forms = checkcache(filename)
if forms is not None: return forms
fp = _open_formfile(filename)
nforms = _parse_fd_header(fp)
forms = {}
for i in range(nforms):
form = _parse_fd_form(fp, None)
forms[form[0].Name] = form
writecache(filename, forms)
return forms
#
# Internal: see if a cached version of the file exists
#
MAGIC = '.fdc'
_internal_cache = {} # Used by frozen scripts only
def checkcache(filename):
if _internal_cache.has_key(filename):
altforms = _internal_cache[filename]
return _unpack_cache(altforms)
import marshal
fp, filename = _open_formfile2(filename)
fp.close()
cachename = filename + 'c'
try:
fp = open(cachename, 'r')
except IOError:
#print 'flp: no cache file', cachename
return None
try:
if fp.read(4) != MAGIC:
print 'flp: bad magic word in cache file', cachename
return None
cache_mtime = rdlong(fp)
file_mtime = getmtime(filename)
if cache_mtime != file_mtime:
#print 'flp: outdated cache file', cachename
return None
#print 'flp: valid cache file', cachename
altforms = marshal.load(fp)
return _unpack_cache(altforms)
finally:
fp.close()
def _unpack_cache(altforms):
forms = {}
for name in altforms.keys():
altobj, altlist = altforms[name]
obj = _newobj()
obj.make(altobj)
list = []
for altobj in altlist:
nobj = _newobj()
nobj.make(altobj)
list.append(nobj)
forms[name] = obj, list
return forms
def rdlong(fp):
s = fp.read(4)
if len(s) != 4: return None
a, b, c, d = s[0], s[1], s[2], s[3]
return ord(a)<<24 | ord(b)<<16 | ord(c)<<8 | ord(d)
def wrlong(fp, x):
a, b, c, d = (x>>24)&0xff, (x>>16)&0xff, (x>>8)&0xff, x&0xff
fp.write(chr(a) + chr(b) + chr(c) + chr(d))
def getmtime(filename):
import os
from stat import ST_MTIME
try:
return os.stat(filename)[ST_MTIME]
except os.error:
return None
#
# Internal: write cached version of the form (parsing is too slow!)
#
def writecache(filename, forms):
import marshal
fp, filename = _open_formfile2(filename)
fp.close()
cachename = filename + 'c'
try:
fp = open(cachename, 'w')
except IOError:
print 'flp: can\'t create cache file', cachename
return # Never mind
fp.write('\0\0\0\0') # Seek back and write MAGIC when done
wrlong(fp, getmtime(filename))
altforms = _pack_cache(forms)
marshal.dump(altforms, fp)
fp.seek(0)
fp.write(MAGIC)
fp.close()
#print 'flp: wrote cache file', cachename
#
# External: print some statements that set up the internal cache.
# This is for use with the "freeze" script. You should call
# flp.freeze(filename) for all forms used by the script, and collect
# the output on a file in a module file named "frozenforms.py". Then
# in the main program of the script import frozenforms.
# (Don't forget to take this out when using the unfrozen version of
# the script!)
#
def freeze(filename):
forms = parse_forms(filename)
altforms = _pack_cache(forms)
print 'import flp'
print 'flp._internal_cache[', repr(filename), '] =', altforms
#
# Internal: create the data structure to be placed in the cache
#
def _pack_cache(forms):
altforms = {}
for name in forms.keys():
obj, list = forms[name]
altobj = obj.__dict__
altlist = []
for obj in list: altlist.append(obj.__dict__)
altforms[name] = altobj, altlist
return altforms
#
# Internal: Locate form file (using PYTHONPATH) and open file
#
def _open_formfile(filename):
return _open_formfile2(filename)[0]
def _open_formfile2(filename):
if filename[-3:] != '.fd':
filename = filename + '.fd'
if filename[0] == '/':
try:
fp = open(filename,'r')
except IOError:
fp = None
else:
for pc in sys.path:
pn = os.path.join(pc, filename)
try:
fp = open(pn, 'r')
filename = pn
break
except IOError:
fp = None
if fp is None:
raise error, 'Cannot find forms file ' + filename
return fp, filename
#
# Internal: parse the fd file header, return number of forms
#
def _parse_fd_header(file):
# First read the magic header line
datum = _parse_1_line(file)
if datum != ('Magic', 12321):
raise error, 'Not a forms definition file'
# Now skip until we know number of forms
while 1:
datum = _parse_1_line(file)
if type(datum) == type(()) and datum[0] == 'Numberofforms':
break
return datum[1]
#
# Internal: parse fd form, or skip if name doesn't match.
# the special value None means 'always parse it'.
#
def _parse_fd_form(file, name):
datum = _parse_1_line(file)
if datum != FORMLINE:
raise error, 'Missing === FORM === line'
form = _parse_object(file)
if form.Name == name or name is None:
objs = []
for j in range(form.Numberofobjects):
obj = _parse_object(file)
objs.append(obj)
return (form, objs)
else:
for j in range(form.Numberofobjects):
_skip_object(file)
return None
#
# Internal class: a convenient place to store object info fields
#
class _newobj:
def add(self, name, value):
self.__dict__[name] = value
def make(self, dict):
for name in dict.keys():
self.add(name, dict[name])
#
# Internal parsing routines.
#
def _parse_string(str):
if '\\' in str:
s = '\'' + str + '\''
try:
return eval(s)
except:
pass
return str
def _parse_num(str):
return eval(str)
def _parse_numlist(str):
slist = string.split(str)
nlist = []
for i in slist:
nlist.append(_parse_num(i))
return nlist
# This dictionary maps item names to parsing routines.
# If no routine is given '_parse_num' is default.
_parse_func = { \
'Name': _parse_string, \
'Box': _parse_numlist, \
'Colors': _parse_numlist, \
'Label': _parse_string, \
'Name': _parse_string, \
'Callback': _parse_string, \
'Argument': _parse_string }
# This function parses a line, and returns either
# a string or a tuple (name,value)
import re
prog = re.compile('^([^:]*): *(.*)')
def _parse_line(line):
match = prog.match(line)
if not match:
return line
name, value = match.group(1, 2)
if name[0] == 'N':
name = string.join(string.split(name),'')
name = string.lower(name)
name = string.capitalize(name)
try:
pf = _parse_func[name]
except KeyError:
pf = _parse_num
value = pf(value)
return (name, value)
def _readline(file):
line = file.readline()
if not line:
raise EOFError
return line[:-1]
def _parse_1_line(file):
line = _readline(file)
while line == '':
line = _readline(file)
return _parse_line(line)
def _skip_object(file):
line = ''
while not line in (SPLITLINE, FORMLINE, ENDLINE):
pos = file.tell()
line = _readline(file)
if line == FORMLINE:
file.seek(pos)
def _parse_object(file):
obj = _newobj()
while 1:
pos = file.tell()
datum = _parse_1_line(file)
if datum in (SPLITLINE, FORMLINE, ENDLINE):
if datum == FORMLINE:
file.seek(pos)
return obj
if type(datum) is not type(()) or len(datum) != 2:
raise error, 'Parse error, illegal line in object: '+datum
obj.add(datum[0], datum[1])
#################################################################
# Part 2 - High-level object/form creation routines #
#################################################################
#
# External - Create a form an link to an instance variable.
#
def create_full_form(inst, (fdata, odatalist)):
form = create_form(fdata)
exec 'inst.'+fdata.Name+' = form\n'
for odata in odatalist:
create_object_instance(inst, form, odata)
#
# External - Merge a form into an existing form in an instance
# variable.
#
def merge_full_form(inst, form, (fdata, odatalist)):
exec 'inst.'+fdata.Name+' = form\n'
if odatalist[0].Class != FL.BOX:
raise error, 'merge_full_form() expects FL.BOX as first obj'
for odata in odatalist[1:]:
create_object_instance(inst, form, odata)
#################################################################
# Part 3 - Low-level object/form creation routines #
#################################################################
#
# External Create_form - Create form from parameters
#
def create_form(fdata):
import fl
return fl.make_form(FL.NO_BOX, fdata.Width, fdata.Height)
#
# External create_object - Create an object. Make sure there are
# no callbacks. Returns the object created.
#
def create_object(form, odata):
obj = _create_object(form, odata)
if odata.Callback:
raise error, 'Creating free object with callback'
return obj
#
# External create_object_instance - Create object in an instance.
#
def create_object_instance(inst, form, odata):
obj = _create_object(form, odata)
if odata.Callback:
cbfunc = eval('inst.'+odata.Callback)
obj.set_call_back(cbfunc, odata.Argument)
if odata.Name:
exec 'inst.' + odata.Name + ' = obj\n'
#
# Internal _create_object: Create the object and fill options
#
def _create_object(form, odata):
crfunc = _select_crfunc(form, odata.Class)
obj = crfunc(odata.Type, odata.Box[0], odata.Box[1], odata.Box[2], \
odata.Box[3], odata.Label)
if not odata.Class in (FL.BEGIN_GROUP, FL.END_GROUP):
obj.boxtype = odata.Boxtype
obj.col1 = odata.Colors[0]
obj.col2 = odata.Colors[1]
obj.align = odata.Alignment
obj.lstyle = odata.Style
obj.lsize = odata.Size
obj.lcol = odata.Lcol
return obj
#
# Internal crfunc: helper function that returns correct create function
#
def _select_crfunc(fm, cl):
if cl == FL.BEGIN_GROUP: return fm.bgn_group
elif cl == FL.END_GROUP: return fm.end_group
elif cl == FL.BITMAP: return fm.add_bitmap
elif cl == FL.BOX: return fm.add_box
elif cl == FL.BROWSER: return fm.add_browser
elif cl == FL.BUTTON: return fm.add_button
elif cl == FL.CHART: return fm.add_chart
elif cl == FL.CHOICE: return fm.add_choice
elif cl == FL.CLOCK: return fm.add_clock
elif cl == FL.COUNTER: return fm.add_counter
elif cl == FL.DIAL: return fm.add_dial
elif cl == FL.FREE: return fm.add_free
elif cl == FL.INPUT: return fm.add_input
elif cl == FL.LIGHTBUTTON: return fm.add_lightbutton
elif cl == FL.MENU: return fm.add_menu
elif cl == FL.POSITIONER: return fm.add_positioner
elif cl == FL.ROUNDBUTTON: return fm.add_roundbutton
elif cl == FL.SLIDER: return fm.add_slider
elif cl == FL.VALSLIDER: return fm.add_valslider
elif cl == FL.TEXT: return fm.add_text
elif cl == FL.TIMER: return fm.add_timer
else:
raise error, 'Unknown object type: %r' % (cl,)
def test():
import time
t0 = time.time()
if len(sys.argv) == 2:
forms = parse_forms(sys.argv[1])
t1 = time.time()
print 'parse time:', 0.001*(t1-t0), 'sec.'
keys = forms.keys()
keys.sort()
for i in keys:
_printform(forms[i])
elif len(sys.argv) == 3:
form = parse_form(sys.argv[1], sys.argv[2])
t1 = time.time()
print 'parse time:', round(t1-t0, 3), 'sec.'
_printform(form)
else:
print 'Usage: test fdfile [form]'
def _printform(form):
f = form[0]
objs = form[1]
print 'Form ', f.Name, ', size: ', f.Width, f.Height, ' Nobj ', f.Numberofobjects
for i in objs:
print ' Obj ', i.Name, ' type ', i.Class, i.Type
print ' Box ', i.Box, ' btype ', i.Boxtype
print ' Label ', i.Label, ' size/style/col/align ', i.Size,i.Style, i.Lcol, i.Alignment
print ' cols ', i.Colors
print ' cback ', i.Callback, i.Argument
| mit |
tiagochiavericosta/edx-platform | common/djangoapps/performance/views/__init__.py | 100 | 1765 | import datetime
import json
import logging
from django.http import HttpResponse
from track.utils import DateTimeJSONEncoder
perflog = logging.getLogger("perflog")
def _get_request_header(request, header_name, default=''):
"""Helper method to get header values from a request's META dict, if present."""
if request is not None and hasattr(request, 'META') and header_name in request.META:
return request.META[header_name]
else:
return default
def _get_request_value(request, value_name, default=''):
"""Helper method to get header values from a request's REQUEST dict, if present."""
if request is not None and hasattr(request, 'REQUEST') and value_name in request.REQUEST:
return request.REQUEST[value_name]
else:
return default
def performance_log(request):
"""
Log when POST call to "performance" URL is made by a user.
Request should provide "event" and "page" arguments.
"""
event = {
"ip": _get_request_header(request, 'REMOTE_ADDR'),
"referer": _get_request_header(request, 'HTTP_REFERER'),
"accept_language": _get_request_header(request, 'HTTP_ACCEPT_LANGUAGE'),
"event_source": "browser",
"event": _get_request_value(request, 'event'),
"agent": _get_request_header(request, 'HTTP_USER_AGENT'),
"page": _get_request_value(request, 'page'),
"id": _get_request_value(request, 'id'),
"expgroup": _get_request_value(request, 'expgroup'),
"value": _get_request_value(request, 'value'),
"time": datetime.datetime.utcnow(),
"host": _get_request_header(request, 'SERVER_NAME'),
}
perflog.info(json.dumps(event, cls=DateTimeJSONEncoder))
return HttpResponse(status=204)
| agpl-3.0 |
lepistone/purchase-workflow | vendor_consignment_stock/model/purchase_order.py | 25 | 1157 | # -*- coding: utf-8 -*-
# Author: Leonardo Pistone
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openerp import models, fields, api
class PurchaseOrder(models.Model):
_inherit = 'purchase.order'
is_vci = fields.Boolean('Vendor Consignment Inventory')
@api.multi
def has_stockable_product(self):
self.ensure_one()
if self.is_vci:
return False
else:
return super(PurchaseOrder, self).has_stockable_product()
| agpl-3.0 |
ptisserand/ansible | test/units/modules/network/nso/test_nso_query.py | 41 | 2148 | #
# Copyright (c) 2017 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from ansible.compat.tests.mock import patch
from ansible.modules.network.nso import nso_query
from . import nso_module
from .nso_module import MockResponse
class TestNsoQuery(nso_module.TestNsoModule):
module = nso_query
@patch('ansible.module_utils.network.nso.nso.open_url')
def test_nso_query(self, open_url_mock):
xpath = '/packages/package'
fields = ['name', 'package-version']
calls = [
MockResponse('login', {}, 200, '{}', {'set-cookie': 'id'}),
MockResponse('get_system_setting', {'operation': 'version'}, 200, '{"result": "4.5"}'),
MockResponse('new_trans', {'mode': 'read'}, 200, '{"result": {"th": 1}}'),
MockResponse('query',
{'xpath_expr': xpath, 'selection': fields}, 200,
'{"result": {"results": [["test", "1.0"]]}}'),
MockResponse('logout', {}, 200, '{"result": {}}'),
]
open_url_mock.side_effect = lambda *args, **kwargs: nso_module.mock_call(calls, *args, **kwargs)
nso_module.set_module_args({
'username': 'user', 'password': 'password',
'url': 'http://localhost:8080/jsonrpc',
'xpath': xpath,
'fields': fields
})
self.execute_module(changed=False, output=[["test", "1.0"]])
self.assertEqual(0, len(calls))
| gpl-3.0 |
Zouyiran/ryu | ryu/services/protocols/bgp/operator/views/fields.py | 38 | 1875 | import importlib
import inspect
class Field(object):
def __init__(self, field_name):
self.field_name = field_name
def get(self, obj):
return getattr(obj, self.field_name)
class RelatedViewField(Field):
def __init__(self, field_name, operator_view_class):
super(RelatedViewField, self).__init__(field_name)
self.__operator_view_class = operator_view_class
@property
def _operator_view_class(self):
if inspect.isclass(self.__operator_view_class):
return self.__operator_view_class
elif isinstance(self.__operator_view_class, basestring):
try:
module_name, class_name =\
self.__operator_view_class.rsplit('.', 1)
return class_for_name(module_name, class_name)
except (AttributeError, ValueError, ImportError):
raise WrongOperatorViewClassError(
'There is no "%s" class' % self.__operator_view_class
)
def retrieve_and_wrap(self, obj):
related_obj = self.get(obj)
return self.wrap(related_obj)
def wrap(self, obj):
return self._operator_view_class(obj)
class RelatedListViewField(RelatedViewField):
pass
class RelatedDictViewField(RelatedViewField):
pass
class DataField(Field):
pass
class OptionalDataField(DataField):
def get(self, obj):
if hasattr(obj, self.field_name):
return getattr(obj, self.field_name)
else:
return None
class WrongOperatorViewClassError(Exception):
pass
def class_for_name(module_name, class_name):
# load the module, will raise ImportError if module cannot be loaded
m = importlib.import_module(module_name)
# get the class, will raise AttributeError if class cannot be found
c = getattr(m, class_name)
return c
| apache-2.0 |
lawzou/shoop | shoop/simple_supplier/migrations/0001_initial.py | 8 | 2221 | # -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import shoop.core.fields
class Migration(migrations.Migration):
dependencies = [
('shoop', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='StockAdjustment',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('created_on', models.DateTimeField(db_index=True, auto_now_add=True)),
('delta', shoop.core.fields.QuantityField(default=0, decimal_places=9, max_digits=36)),
('created_by', models.ForeignKey(null=True, to=settings.AUTH_USER_MODEL, blank=True)),
('product', models.ForeignKey(to='shoop.Product', related_name='+')),
('supplier', models.ForeignKey(to='shoop.Supplier')),
],
),
migrations.CreateModel(
name='StockCount',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('logical_count', shoop.core.fields.QuantityField(default=0, editable=False, decimal_places=9, max_digits=36)),
('physical_count', shoop.core.fields.QuantityField(default=0, editable=False, decimal_places=9, max_digits=36)),
('product', models.ForeignKey(editable=False, to='shoop.Product', related_name='+')),
('supplier', models.ForeignKey(to='shoop.Supplier', editable=False)),
],
),
migrations.AlterUniqueTogether(
name='stockcount',
unique_together=set([('product', 'supplier')]),
),
migrations.AlterUniqueTogether(
name='stockadjustment',
unique_together=set([('product', 'supplier')]),
),
]
| agpl-3.0 |
beeftornado/sentry | src/sentry_plugins/asana/plugin.py | 1 | 9106 | from __future__ import absolute_import
import six
from django.conf.urls import url
from rest_framework.response import Response
from sentry.exceptions import PluginError, PluginIdentityRequired
from sentry.plugins.bases.issue2 import IssuePlugin2, IssueGroupActionEndpoint
from sentry.utils.http import absolute_uri
from sentry.integrations import FeatureDescription, IntegrationFeatures
from sentry_plugins.base import CorePluginMixin
from .client import AsanaClient
ERR_AUTH_NOT_CONFIGURED = "You still need to associate an Asana identity with this account."
DESCRIPTION = """
Improve your productivity by creating tasks in Asana directly
from Sentry issues. This integration also allows you to link Sentry
issues to existing tasks in Asana.
"""
class AsanaPlugin(CorePluginMixin, IssuePlugin2):
description = DESCRIPTION
slug = "asana"
title = "Asana"
conf_title = title
conf_key = "asana"
auth_provider = "asana"
required_field = "workspace"
feature_descriptions = [
FeatureDescription(
"""
Create and link Sentry issue groups directly to an Asana ticket in any of your
projects, providing a quick way to jump from a Sentry bug to tracked ticket!
""",
IntegrationFeatures.ISSUE_BASIC,
),
FeatureDescription(
"""
Link Sentry issues to existing Asana tickets.
""",
IntegrationFeatures.ISSUE_BASIC,
),
]
def get_group_urls(self):
return super(AsanaPlugin, self).get_group_urls() + [
url(
r"^autocomplete",
IssueGroupActionEndpoint.as_view(view_method_name="view_autocomplete", plugin=self),
)
]
def is_configured(self, request, project, **kwargs):
return bool(self.get_option("workspace", project))
def has_workspace_access(self, workspace, choices):
for c, _ in choices:
if workspace == c:
return True
return False
def get_workspace_choices(self, workspaces):
return [(w["gid"], w["name"]) for w in workspaces["data"]]
def get_new_issue_fields(self, request, group, event, **kwargs):
fields = super(AsanaPlugin, self).get_new_issue_fields(request, group, event, **kwargs)
client = self.get_client(request.user)
workspaces = client.get_workspaces()
workspace_choices = self.get_workspace_choices(workspaces)
workspace = self.get_option("workspace", group.project)
if workspace and not self.has_workspace_access(workspace, workspace_choices):
workspace_choices.append((workspace, workspace))
# use labels that are more applicable to asana
for field in fields:
if field["name"] == "title":
field["label"] = "Name"
if field["name"] == "description":
field["label"] = "Notes"
field["required"] = False
return (
[
{
"name": "workspace",
"label": "Asana Workspace",
"default": workspace,
"type": "select",
"choices": workspace_choices,
"readonly": True,
}
]
+ fields
+ [
{
"name": "project",
"label": "Project",
"type": "select",
"has_autocomplete": True,
"required": False,
"placeholder": "Start typing to search for a project",
},
{
"name": "assignee",
"label": "Assignee",
"type": "select",
"has_autocomplete": True,
"required": False,
"placeholder": "Start typing to search for a user",
},
]
)
def get_link_existing_issue_fields(self, request, group, event, **kwargs):
return [
{
"name": "issue_id",
"label": "Task",
"default": "",
"type": "select",
"has_autocomplete": True,
},
{
"name": "comment",
"label": "Comment",
"default": absolute_uri(
group.get_absolute_url(params={"referrer": "asana_plugin"})
),
"type": "textarea",
"help": ("Leave blank if you don't want to " "add a comment to the Asana issue."),
"required": False,
},
]
def get_client(self, user):
auth = self.get_auth_for_user(user=user)
if auth is None:
raise PluginIdentityRequired(ERR_AUTH_NOT_CONFIGURED)
return AsanaClient(auth=auth)
def error_message_from_json(self, data):
errors = data.get("errors")
if errors:
return " ".join([e["message"] for e in errors])
return "unknown error"
def create_issue(self, request, group, form_data, **kwargs):
client = self.get_client(request.user)
try:
response = client.create_issue(
workspace=self.get_option("workspace", group.project), data=form_data
)
except Exception as e:
self.raise_error(e, identity=client.auth)
return response["data"]["gid"]
def link_issue(self, request, group, form_data, **kwargs):
client = self.get_client(request.user)
try:
issue = client.get_issue(issue_id=form_data["issue_id"])["data"]
except Exception as e:
self.raise_error(e, identity=client.auth)
comment = form_data.get("comment")
if comment:
try:
client.create_comment(issue["gid"], {"text": comment})
except Exception as e:
self.raise_error(e, identity=client.auth)
return {"title": issue["name"]}
def get_issue_label(self, group, issue_id, **kwargs):
return "Asana Issue"
def get_issue_url(self, group, issue_id, **kwargs):
return "https://app.asana.com/0/0/%s" % issue_id
def validate_config(self, project, config, actor):
"""
```
if config['foo'] and not config['bar']:
raise PluginError('You cannot configure foo with bar')
return config
```
"""
try:
int(config["workspace"])
except ValueError as exc:
self.logger.exception(six.text_type(exc))
raise PluginError("Non-numeric workspace value")
return config
def get_config(self, *args, **kwargs):
user = kwargs["user"]
try:
client = self.get_client(user)
except PluginIdentityRequired as e:
self.raise_error(e)
workspaces = client.get_workspaces()
workspace_choices = self.get_workspace_choices(workspaces)
workspace = self.get_option("workspace", kwargs["project"])
# check to make sure the current user has access to the workspace
helptext = None
if workspace and not self.has_workspace_access(workspace, workspace_choices):
workspace_choices.append((workspace, workspace))
helptext = (
"This plugin has been configured for an Asana workspace "
"that either you don't have access to or doesn't "
"exist. You can edit the configuration, but you will not "
"be able to change it back to the current configuration "
"unless a teammate grants you access to the workspace in Asana."
)
return [
{
"name": "workspace",
"label": "Workspace",
"type": "select",
"choices": workspace_choices,
"default": workspace or workspaces["data"][0]["gid"],
"help": helptext,
}
]
def view_autocomplete(self, request, group, **kwargs):
field = request.GET.get("autocomplete_field")
query = request.GET.get("autocomplete_query")
client = self.get_client(request.user)
workspace = self.get_option("workspace", group.project)
results = []
field_name = field
if field == "issue_id":
field_name = "task"
elif field == "assignee":
field_name = "user"
try:
response = client.search(workspace, field_name, query.encode("utf-8"))
except Exception as e:
return Response(
{"error_type": "validation", "errors": [{"__all__": self.message_from_error(e)}]},
status=400,
)
else:
results = [
{"text": "(#%s) %s" % (i["gid"], i["name"]), "id": i["gid"]}
for i in response.get("data", [])
]
return Response({field: results})
| bsd-3-clause |
lociii/googleads-python-lib | examples/adspygoogle/adwords/v201306/account_management/get_account_changes.py | 3 | 3874 | #!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets the changes in the account during the last 24 hours.
Tags: CustomerSyncService.get
"""
__author__ = 'api.kwinter@gmail.com (Kevin Winter)'
import datetime
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
def main(client):
# Initialize appropriate service.
customer_sync_service = client.GetCustomerSyncService(version='v201306')
campaign_service = client.GetCampaignService(version='v201306')
# Construct selector and get all campaigns.
selector = {
'fields': ['Id', 'Name', 'Status']
}
campaigns = campaign_service.Get(selector)[0]
campaign_ids = []
if 'entries' in campaigns:
for campaign in campaigns['entries']:
campaign_ids.append(campaign['id'])
else:
print 'No campaigns were found.'
sys.exit(0)
# Construct selector and get all changes.
today = datetime.datetime.today()
yesterday = today - datetime.timedelta(1)
selector = {
'dateTimeRange': {
'min': yesterday.strftime('%Y%m%d %H%M%S'),
'max': today.strftime('%Y%m%d %H%M%S')
},
'campaignIds': campaign_ids
}
account_changes = customer_sync_service.Get(selector)[0]
# Display results.
if account_changes:
if 'lastChangeTimestamp' in account_changes:
print 'Most recent changes: %s' % account_changes['lastChangeTimestamp']
if account_changes['changedCampaigns']:
for data in account_changes['changedCampaigns']:
print ('Campaign with id \'%s\' has change status \'%s\'.'
% (data['campaignId'], data['campaignChangeStatus']))
if (data['campaignChangeStatus'] != 'NEW' and
data['campaignChangeStatus'] != 'FIELDS_UNCHANGED'):
print ' Added ad extensions: %s' % data.get('addedAdExtensions')
print ' Deleted ad extensions: %s' % data.get('deletedAdExtensions')
print (' Added campaign criteria: %s'
% data.get('addedCampaignCriteria'))
print (' Deleted campaign criteria: %s'
% data.get('deletedCampaignCriteria'))
print (' Campaign targeting changed: %s'
% data.get('campaignTargetingChanged'))
if data.get('changedAdGroups'):
for ad_group_data in data['changedAdGroups']:
print (' Ad group with id \'%s\' has change status \'%s\'.'
% (ad_group_data['adGroupId'],
ad_group_data['adGroupChangeStatus']))
if ad_group_data['adGroupChangeStatus'] != 'NEW':
print ' Changed ads: %s' % ad_group_data['changedAds']
print (' Changed criteria: %s'
% ad_group_data['changedCriteria'])
print (' Deleted criteria: %s'
% ad_group_data['deletedCriteria'])
else:
print 'No changes were found.'
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client)
| apache-2.0 |
rhuss/bazel | tools/build_defs/docker/rewrite_json_test.py | 18 | 17487 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing for rewrite_json."""
import unittest
from tools.build_defs.docker.rewrite_json import _DOCKER_VERSION
from tools.build_defs.docker.rewrite_json import _OPERATING_SYSTEM
from tools.build_defs.docker.rewrite_json import _PROCESSOR_ARCHITECTURE
from tools.build_defs.docker.rewrite_json import MetadataOptions
from tools.build_defs.docker.rewrite_json import RewriteMetadata
class RewriteJsonTest(unittest.TestCase):
"""Testing for rewrite_json."""
def testNewEntrypoint(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor'
}
}
name = 'deadbeef'
parent = 'blah'
entrypoint = ['/bin/bash']
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Entrypoint': entrypoint
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, entrypoint=entrypoint, parent=parent))
self.assertEquals(expected, actual)
def testOverrideEntrypoint(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Entrypoint': ['/bin/sh', 'does', 'not', 'matter'],
}
}
name = 'deadbeef'
parent = 'blah'
entrypoint = ['/bin/bash']
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Entrypoint': entrypoint
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, entrypoint=entrypoint, parent=parent))
self.assertEquals(expected, actual)
def testNewCmd(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Entrypoint': ['/bin/bash'],
}
}
name = 'deadbeef'
parent = 'blah'
cmd = ['/bin/bash']
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Entrypoint': ['/bin/bash'],
'Cmd': cmd
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, cmd=cmd, parent=parent))
self.assertEquals(expected, actual)
def testOverrideCmd(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Entrypoint': ['/bin/bash'],
'Cmd': ['does', 'not', 'matter'],
}
}
name = 'deadbeef'
parent = 'blah'
cmd = ['does', 'matter']
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Entrypoint': ['/bin/bash'],
'Cmd': cmd
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, cmd=cmd, parent=parent))
self.assertEquals(expected, actual)
def testOverrideBoth(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Entrypoint': ['/bin/sh'],
'Cmd': ['does', 'not', 'matter'],
}
}
name = 'deadbeef'
parent = 'blah'
entrypoint = ['/bin/bash', '-c']
cmd = ['my-command', 'my-arg1', 'my-arg2']
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Entrypoint': entrypoint,
'Cmd': cmd
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, entrypoint=entrypoint, cmd=cmd, parent=parent))
self.assertEquals(expected, actual)
def testOverrideParent(self):
name = 'me!'
parent = 'parent'
# In the typical case, we expect the parent to
# come in as the 'id', and our grandparent to
# be its 'parent'.
in_data = {
'id': parent,
'parent': 'grandparent',
}
expected = {
'id': name,
'parent': parent,
'config': {},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, parent=parent))
self.assertEquals(expected, actual)
def testNewSize(self):
# Size is one of the few fields that, when omitted,
# should be removed.
in_data = {
'id': 'you',
'Size': '124',
}
name = 'me'
parent = 'blah'
size = '4321'
expected = {
'id': name,
'parent': parent,
'Size': size,
'config': {},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, size=size, parent=parent))
self.assertEquals(expected, actual)
def testOmitSize(self):
# Size is one of the few fields that, when omitted,
# should be removed.
in_data = {
'id': 'you',
'Size': '124',
}
name = 'me'
parent = 'blah'
expected = {
'id': name,
'parent': parent,
'config': {},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, parent=parent))
self.assertEquals(expected, actual)
def testOmitName(self):
# Name is required.
with self.assertRaises(Exception):
RewriteMetadata({}, MetadataOptions(name=None))
def testStripContainerConfig(self):
# Size is one of the few fields that, when omitted,
# should be removed.
in_data = {
'id': 'you',
'container_config': {},
}
name = 'me'
parent = 'blah'
expected = {
'id': name,
'parent': parent,
'config': {},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, parent=parent))
self.assertEquals(expected, actual)
def testEmptyBase(self):
in_data = {}
name = 'deadbeef'
entrypoint = ['/bin/bash', '-c']
cmd = ['my-command', 'my-arg1', 'my-arg2']
size = '999'
expected = {
'id': name,
'config': {
'Entrypoint': entrypoint,
'Cmd': cmd,
'ExposedPorts': {
'80/tcp': {}
}
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
'Size': size,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, entrypoint=entrypoint, cmd=cmd, size=size,
ports=['80']))
self.assertEquals(expected, actual)
def testOmitParentWithBase(self):
# Our input data should be empty when parent is omitted
in_data = {
'id': 'you',
}
with self.assertRaises(Exception):
RewriteMetadata(in_data, MetadataOptions(name='me'))
def testNewPort(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor'
}
}
name = 'deadbeef'
parent = 'blah'
port = '80'
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'ExposedPorts': {
port + '/tcp': {}
}
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, parent=parent, ports=[port]))
self.assertEquals(expected, actual)
def testAugmentPort(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'ExposedPorts': {
'443/tcp': {}
}
}
}
name = 'deadbeef'
parent = 'blah'
port = '80'
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'ExposedPorts': {
'443/tcp': {},
port + '/tcp': {}
}
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, parent=parent, ports=[port]))
self.assertEquals(expected, actual)
def testMultiplePorts(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor'
}
}
name = 'deadbeef'
parent = 'blah'
port1 = '80'
port2 = '8080'
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'ExposedPorts': {
port1 + '/tcp': {},
port2 + '/tcp': {}
}
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, parent=parent, ports=[port1, port2]))
self.assertEquals(expected, actual)
def testPortCollision(self):
port = '80'
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'ExposedPorts': {
port + '/tcp': {}
}
}
}
name = 'deadbeef'
parent = 'blah'
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'ExposedPorts': {
port + '/tcp': {}
}
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, parent=parent, ports=[port]))
self.assertEquals(expected, actual)
def testPortWithProtocol(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor'
}
}
name = 'deadbeef'
parent = 'blah'
port = '80/tcp'
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'ExposedPorts': {
port: {}
}
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, parent=parent, ports=[port]))
self.assertEquals(expected, actual)
def testNewVolume(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor'
}
}
name = 'deadbeef'
parent = 'blah'
volume = '/logs'
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Volumes': {
volume: {}
}
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, parent=parent, volumes=[volume]))
self.assertEquals(expected, actual)
def testAugmentVolume(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Volumes': {
'/original': {}
}
}
}
name = 'deadbeef'
parent = 'blah'
volume = '/data'
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Volumes': {
'/original': {},
volume: {}
}
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, parent=parent, volumes=[volume]))
self.assertEquals(expected, actual)
def testMultipleVolumes(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor'
}
}
name = 'deadbeef'
parent = 'blah'
volume1 = '/input'
volume2 = '/output'
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Volumes': {
volume1: {},
volume2: {}
}
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, parent=parent, volumes=[volume1, volume2]))
self.assertEquals(expected, actual)
def testEnv(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor'
}
}
name = 'deadbeef'
parent = 'blah'
env = [
'baz=blah',
'foo=bar',
]
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Env': env,
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, env=env, parent=parent))
self.assertEquals(expected, actual)
def testEnvResolveReplace(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Env': [
'foo=bar',
'baz=blah',
'blah=still around',
],
}
}
name = 'deadbeef'
parent = 'blah'
env = [
'baz=replacement',
'foo=$foo:asdf',
]
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Env': [
'baz=replacement',
'blah=still around',
'foo=bar:asdf',
],
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, env=env, parent=parent))
self.assertEquals(expected, actual)
def testAugmentVolumeWithNullInput(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Volumes': None,
}
}
name = 'deadbeef'
parent = 'blah'
volume = '/data'
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Volumes': {
volume: {}
}
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, parent=parent, volumes=[volume]))
self.assertEquals(expected, actual)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
kampanita/pelisalacarta | python/main-classic/core/config.py | 1 | 12081 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta 4
# Copyright 2015 tvalacarta@gmail.com
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#
# Distributed under the terms of GNU General Public License v3 (GPLv3)
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------
# This file is part of pelisalacarta 4.
#
# pelisalacarta 4 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pelisalacarta 4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pelisalacarta 4. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------
# Parámetros de configuración (kodi)
# ------------------------------------------------------------
import os
import xbmc
import xbmcaddon
PLATFORM_NAME = "kodi-jarvis"
PLUGIN_NAME = "pelisalacarta"
__settings__ = xbmcaddon.Addon(id="plugin.video." + PLUGIN_NAME)
__language__ = __settings__.getLocalizedString
def get_platform():
return PLATFORM_NAME
def is_xbmc():
return True
def get_library_support():
return True
def get_system_platform():
""" fonction: pour recuperer la platform que xbmc tourne """
import xbmc
platform = "unknown"
if xbmc.getCondVisibility("system.platform.linux"):
platform = "linux"
elif xbmc.getCondVisibility("system.platform.xbox"):
platform = "xbox"
elif xbmc.getCondVisibility("system.platform.windows"):
platform = "windows"
elif xbmc.getCondVisibility("system.platform.osx"):
platform = "osx"
return platform
def open_settings():
__settings__.openSettings()
def get_setting(name, channel=""):
"""Retorna el valor de configuracion del parametro solicitado.
Devuelve el valor del parametro 'name' en la configuracion global o en la configuracion propia del canal 'channel'.
Si se especifica el nombre del canal busca en la ruta \addon_data\plugin.video.pelisalacarta\settings_channels el archivo channel_data.json
y lee el valor del parametro 'name'. Si el archivo channel_data.json no existe busca en la carpeta channels el archivo
channel.xml y crea un archivo channel_data.json antes de retornar el valor solicitado.
Si el parametro 'name' no existe en channel_data.json lo busca en la configuracion global y si ahi tampoco existe devuelve un str vacio.
Parametros:
name -- nombre del parametro
channel [opcional] -- nombre del canal
Retorna:
value -- El valor del parametro 'name'
"""
if channel:
from core import channeltools
value = channeltools.get_channel_setting(name, channel)
if value is not None:
return value
# Devolvemos el valor del parametro global 'name'
return __settings__.getSetting(name)
def set_setting(name,value, channel=""):
"""Fija el valor de configuracion del parametro indicado.
Establece 'value' como el valor del parametro 'name' en la configuracion global o en la configuracion propia del canal 'channel'.
Devuelve el valor cambiado o None si la asignacion no se ha podido completar.
Si se especifica el nombre del canal busca en la ruta \addon_data\plugin.video.pelisalacarta\settings_channels el archivo channel_data.json
y establece el parametro 'name' al valor indicado por 'value'. Si el archivo channel_data.json no existe busca en la carpeta channels el archivo
channel.xml y crea un archivo channel_data.json antes de modificar el parametro 'name'.
Si el parametro 'name' no existe lo añade, con su valor, al archivo correspondiente.
Parametros:
name -- nombre del parametro
value -- valor del parametro
channel [opcional] -- nombre del canal
Retorna:
'value' en caso de que se haya podido fijar el valor y None en caso contrario
"""
if channel:
from core import channeltools
return channeltools.set_channel_setting(name, value, channel)
else:
try:
__settings__.setSetting(name, value)
except:
logger.info("[config.py] ERROR al fijar el parametro global {0}= {1}".format(name, value))
return None
return value
def get_localized_string(code):
dev = __language__(code)
try:
dev = dev.encode("utf-8")
except:
pass
return dev
def get_library_path():
if get_system_platform() == "xbox":
default = xbmc.translatePath(os.path.join(get_runtime_path(), "library"))
else:
default = xbmc.translatePath("special://profile/addon_data/plugin.video." +
PLUGIN_NAME + "/library")
value = get_setting("librarypath")
if value == "":
value = default
return value
def get_temp_file(filename):
return xbmc.translatePath(os.path.join("special://temp/", filename))
def get_runtime_path():
return xbmc.translatePath(__settings__.getAddonInfo('Path'))
def get_data_path():
dev = xbmc.translatePath(__settings__.getAddonInfo('Profile'))
# Parche para XBMC4XBOX
if not os.path.exists(dev):
os.makedirs(dev)
return dev
def get_cookie_data():
import os
ficherocookies = os.path.join(get_data_path(), 'cookies.dat')
cookiedatafile = open(ficherocookies, 'r')
cookiedata = cookiedatafile.read()
cookiedatafile.close()
return cookiedata
# Test if all the required directories are created
def verify_directories_created():
import logger
logger.info("pelisalacarta.core.config.verify_directories_created")
# Force download path if empty
download_path = get_setting("downloadpath")
if download_path == "":
if is_xbmc():
download_path_special = "special://profile/addon_data/plugin.video." + PLUGIN_NAME + "/downloads"
set_setting("downloadpath", download_path_special)
else:
download_path = os.path.join(get_data_path(), "downloads")
set_setting("downloadpath", download_path)
# Force download list path if empty
download_list_path = get_setting("downloadlistpath")
if download_list_path == "":
if is_xbmc():
download_list_path_special = "special://profile/addon_data/plugin.video." + PLUGIN_NAME + "/downloads/list"
set_setting("downloadlistpath", download_list_path_special)
else:
download_list_path = os.path.join(get_data_path(), "downloads", "list")
set_setting("downloadlistpath", download_list_path)
# Force bookmark path if empty
bookmark_path = get_setting("bookmarkpath")
if bookmark_path == "":
if is_xbmc():
bookmark_path_special = "special://profile/addon_data/plugin.video." + PLUGIN_NAME + "/downloads/list"
set_setting("bookmarkpath", bookmark_path_special)
else:
bookmark_path = os.path.join(get_data_path(), "bookmarks")
set_setting("bookmarkpath", bookmark_path)
# Create data_path if not exists
if not os.path.exists(get_data_path()):
logger.debug("Creating data_path " + get_data_path())
try:
os.mkdir(get_data_path())
except:
pass
if is_xbmc():
logger.info("Es una plataforma XBMC")
if download_path.startswith("special://"):
# Translate from special and create download_path if not exists
download_path = xbmc.translatePath(download_path)
if not download_path.lower().startswith("smb") and not os.path.exists(download_path):
logger.debug("Creating download_path (from special): " + download_path)
try:
os.mkdir(download_path)
except:
pass
else:
if not download_path.lower().startswith("smb") and not os.path.exists(download_path):
logger.debug("Creating download_path: " + download_path)
try:
os.mkdir(download_path)
except:
pass
if download_list_path.startswith("special://"):
# Create download_list_path if not exists
download_list_path = xbmc.translatePath(download_list_path)
if not download_list_path.lower().startswith("smb") and not os.path.exists(download_list_path):
logger.debug("Creating download_list_path (from special): " + download_list_path)
try:
os.mkdir(download_list_path)
except:
pass
else:
if not download_list_path.lower().startswith("smb") and not os.path.exists(download_list_path):
logger.debug("Creating download_list_path: " + download_list_path)
try:
os.mkdir(download_list_path)
except:
pass
if bookmark_path.startswith("special://"):
# Create bookmark_path if not exists
bookmark_path = xbmc.translatePath(bookmark_path)
if not bookmark_path.lower().startswith("smb") and not os.path.exists(bookmark_path):
logger.debug("Creating bookmark_path (from special): " + bookmark_path)
try:
os.mkdir(bookmark_path)
except:
pass
else:
if not bookmark_path.lower().startswith("smb") and not os.path.exists(bookmark_path):
logger.debug("Creating bookmark_path: " + bookmark_path)
try:
os.mkdir(bookmark_path)
except:
pass
else:
logger.info("No es una plataforma XBMC")
# Create download_path if not exists
if not download_path.lower().startswith("smb") and not os.path.exists(download_path):
logger.debug("Creating download_path " + download_path)
try:
os.mkdir(download_path)
except:
pass
# Create download_list_path if not exists
if not download_list_path.lower().startswith("smb") and not os.path.exists(download_list_path):
logger.debug("Creating download_list_path " + download_list_path)
try:
os.mkdir(download_list_path)
except:
pass
# Create bookmark_path if not exists
if not bookmark_path.lower().startswith("smb") and not os.path.exists(bookmark_path):
logger.debug("Creating bookmark_path " + bookmark_path)
try:
os.mkdir(bookmark_path)
except:
pass
# Create library_path if not exists
if not get_library_path().lower().startswith("smb") and not os.path.exists(get_library_path()):
logger.debug("Creating library_path " + get_library_path())
try:
os.mkdir(get_library_path())
except:
pass
# Create settings_path is not exists
settings_path = os.path.join(get_data_path(), "settings_channels")
if not os.path.exists(settings_path):
logger.debug("Creating settings_path " + settings_path)
try:
os.mkdir(settings_path)
except:
pass
# Checks that a directory "xbmc" is not present on platformcode
old_xbmc_directory = os.path.join(get_runtime_path(), "platformcode", "xbmc")
if os.path.exists(old_xbmc_directory):
logger.debug("Removing old platformcode.xbmc directory")
try:
import shutil
shutil.rmtree(old_xbmc_directory)
except:
pass
| gpl-3.0 |
nateprewitt/pipenv | pipenv/patched/pip/req/req_file.py | 343 | 11926 | """
Requirements file parsing
"""
from __future__ import absolute_import
import os
import re
import shlex
import sys
import optparse
import warnings
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves import filterfalse
import pip
from pip.download import get_file_content
from pip.req.req_install import InstallRequirement
from pip.exceptions import (RequirementsFileParseError)
from pip.utils.deprecation import RemovedInPip10Warning
from pip import cmdoptions
__all__ = ['parse_requirements']
SCHEME_RE = re.compile(r'^(http|https|file):', re.I)
COMMENT_RE = re.compile(r'(^|\s)+#.*$')
SUPPORTED_OPTIONS = [
cmdoptions.constraints,
cmdoptions.editable,
cmdoptions.requirements,
cmdoptions.no_index,
cmdoptions.index_url,
cmdoptions.find_links,
cmdoptions.extra_index_url,
cmdoptions.allow_external,
cmdoptions.allow_all_external,
cmdoptions.no_allow_external,
cmdoptions.allow_unsafe,
cmdoptions.no_allow_unsafe,
cmdoptions.use_wheel,
cmdoptions.no_use_wheel,
cmdoptions.always_unzip,
cmdoptions.no_binary,
cmdoptions.only_binary,
cmdoptions.pre,
cmdoptions.process_dependency_links,
cmdoptions.trusted_host,
cmdoptions.require_hashes,
]
# options to be passed to requirements
SUPPORTED_OPTIONS_REQ = [
cmdoptions.install_options,
cmdoptions.global_options,
cmdoptions.hash,
]
# the 'dest' string values
SUPPORTED_OPTIONS_REQ_DEST = [o().dest for o in SUPPORTED_OPTIONS_REQ]
def parse_requirements(filename, finder=None, comes_from=None, options=None,
session=None, constraint=False, wheel_cache=None):
"""Parse a requirements file and yield InstallRequirement instances.
:param filename: Path or url of requirements file.
:param finder: Instance of pip.index.PackageFinder.
:param comes_from: Origin description of requirements.
:param options: cli options.
:param session: Instance of pip.download.PipSession.
:param constraint: If true, parsing a constraint file rather than
requirements file.
:param wheel_cache: Instance of pip.wheel.WheelCache
"""
if session is None:
raise TypeError(
"parse_requirements() missing 1 required keyword argument: "
"'session'"
)
_, content = get_file_content(
filename, comes_from=comes_from, session=session
)
lines_enum = preprocess(content, options)
for line_number, line in lines_enum:
req_iter = process_line(line, filename, line_number, finder,
comes_from, options, session, wheel_cache,
constraint=constraint)
for req in req_iter:
yield req
def preprocess(content, options):
"""Split, filter, and join lines, and return a line iterator
:param content: the content of the requirements file
:param options: cli options
"""
lines_enum = enumerate(content.splitlines(), start=1)
lines_enum = join_lines(lines_enum)
lines_enum = ignore_comments(lines_enum)
lines_enum = skip_regex(lines_enum, options)
return lines_enum
def process_line(line, filename, line_number, finder=None, comes_from=None,
options=None, session=None, wheel_cache=None,
constraint=False):
"""Process a single requirements line; This can result in creating/yielding
requirements, or updating the finder.
For lines that contain requirements, the only options that have an effect
are from SUPPORTED_OPTIONS_REQ, and they are scoped to the
requirement. Other options from SUPPORTED_OPTIONS may be present, but are
ignored.
For lines that do not contain requirements, the only options that have an
effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may
be present, but are ignored. These lines may contain multiple options
(although our docs imply only one is supported), and all our parsed and
affect the finder.
:param constraint: If True, parsing a constraints file.
:param options: OptionParser options that we may update
"""
parser = build_parser()
defaults = parser.get_default_values()
defaults.index_url = None
if finder:
# `finder.format_control` will be updated during parsing
defaults.format_control = finder.format_control
args_str, options_str = break_args_options(line)
if sys.version_info < (2, 7, 3):
# Prior to 2.7.3, shlex cannot deal with unicode entries
options_str = options_str.encode('utf8')
opts, _ = parser.parse_args(shlex.split(options_str), defaults)
# preserve for the nested code path
line_comes_from = '%s %s (line %s)' % (
'-c' if constraint else '-r', filename, line_number)
# yield a line requirement
if args_str:
isolated = options.isolated_mode if options else False
if options:
cmdoptions.check_install_build_global(options, opts)
# get the options that apply to requirements
req_options = {}
for dest in SUPPORTED_OPTIONS_REQ_DEST:
if dest in opts.__dict__ and opts.__dict__[dest]:
req_options[dest] = opts.__dict__[dest]
yield InstallRequirement.from_line(
args_str, line_comes_from, constraint=constraint,
isolated=isolated, options=req_options, wheel_cache=wheel_cache
)
# yield an editable requirement
elif opts.editables:
isolated = options.isolated_mode if options else False
default_vcs = options.default_vcs if options else None
yield InstallRequirement.from_editable(
opts.editables[0], comes_from=line_comes_from,
constraint=constraint, default_vcs=default_vcs, isolated=isolated,
wheel_cache=wheel_cache
)
# parse a nested requirements file
elif opts.requirements or opts.constraints:
if opts.requirements:
req_path = opts.requirements[0]
nested_constraint = False
else:
req_path = opts.constraints[0]
nested_constraint = True
# original file is over http
if SCHEME_RE.search(filename):
# do a url join so relative paths work
req_path = urllib_parse.urljoin(filename, req_path)
# original file and nested file are paths
elif not SCHEME_RE.search(req_path):
# do a join so relative paths work
req_path = os.path.join(os.path.dirname(filename), req_path)
# TODO: Why not use `comes_from='-r {} (line {})'` here as well?
parser = parse_requirements(
req_path, finder, comes_from, options, session,
constraint=nested_constraint, wheel_cache=wheel_cache
)
for req in parser:
yield req
# percolate hash-checking option upward
elif opts.require_hashes:
options.require_hashes = opts.require_hashes
# set finder options
elif finder:
if opts.allow_external:
warnings.warn(
"--allow-external has been deprecated and will be removed in "
"the future. Due to changes in the repository protocol, it no "
"longer has any effect.",
RemovedInPip10Warning,
)
if opts.allow_all_external:
warnings.warn(
"--allow-all-external has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if opts.allow_unverified:
warnings.warn(
"--allow-unverified has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if opts.index_url:
finder.index_urls = [opts.index_url]
if opts.use_wheel is False:
finder.use_wheel = False
pip.index.fmt_ctl_no_use_wheel(finder.format_control)
if opts.no_index is True:
finder.index_urls = []
if opts.extra_index_urls:
finder.index_urls.extend(opts.extra_index_urls)
if opts.find_links:
# FIXME: it would be nice to keep track of the source
# of the find_links: support a find-links local path
# relative to a requirements file.
value = opts.find_links[0]
req_dir = os.path.dirname(os.path.abspath(filename))
relative_to_reqs_file = os.path.join(req_dir, value)
if os.path.exists(relative_to_reqs_file):
value = relative_to_reqs_file
finder.find_links.append(value)
if opts.pre:
finder.allow_all_prereleases = True
if opts.process_dependency_links:
finder.process_dependency_links = True
if opts.trusted_hosts:
finder.secure_origins.extend(
("*", host, "*") for host in opts.trusted_hosts)
def break_args_options(line):
"""Break up the line into an args and options string. We only want to shlex
(and then optparse) the options, not the args. args can contain markers
which are corrupted by shlex.
"""
tokens = line.split(' ')
args = []
options = tokens[:]
for token in tokens:
if token.startswith('-') or token.startswith('--'):
break
else:
args.append(token)
options.pop(0)
return ' '.join(args), ' '.join(options)
def build_parser():
"""
Return a parser for parsing requirement lines
"""
parser = optparse.OptionParser(add_help_option=False)
option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ
for option_factory in option_factories:
option = option_factory()
parser.add_option(option)
# By default optparse sys.exits on parsing errors. We want to wrap
# that in our own exception.
def parser_exit(self, msg):
raise RequirementsFileParseError(msg)
parser.exit = parser_exit
return parser
def join_lines(lines_enum):
"""Joins a line ending in '\' with the previous line (except when following
comments). The joined line takes on the index of the first line.
"""
primary_line_number = None
new_line = []
for line_number, line in lines_enum:
if not line.endswith('\\') or COMMENT_RE.match(line):
if COMMENT_RE.match(line):
# this ensures comments are always matched later
line = ' ' + line
if new_line:
new_line.append(line)
yield primary_line_number, ''.join(new_line)
new_line = []
else:
yield line_number, line
else:
if not new_line:
primary_line_number = line_number
new_line.append(line.strip('\\'))
# last line contains \
if new_line:
yield primary_line_number, ''.join(new_line)
# TODO: handle space after '\'.
def ignore_comments(lines_enum):
"""
Strips comments and filter empty lines.
"""
for line_number, line in lines_enum:
line = COMMENT_RE.sub('', line)
line = line.strip()
if line:
yield line_number, line
def skip_regex(lines_enum, options):
"""
Skip lines that match '--skip-requirements-regex' pattern
Note: the regex pattern is only built once
"""
skip_regex = options.skip_requirements_regex if options else None
if skip_regex:
pattern = re.compile(skip_regex)
lines_enum = filterfalse(
lambda e: pattern.search(e[1]),
lines_enum)
return lines_enum
| mit |
GunoH/intellij-community | python/python-features-trainer/resources/learnProjects/python/PyCharmLearningProject/src/warehouse/warehouse.py | 9 | 1037 | from util.util import FRUITS
class Warehouse:
# Fruit name to amount of it in warehouse
entry = {} # Apple, banana, etc...
def __init__(self) -> None:
for fruit in FRUITS:
self.entry[fruit] = 0
# fruit name from util.FRUITS (mango, apple...)
def add_fruits(self, fruit_name, quantity) -> None:
cur_quantity = self.entry.get(fruit_name)
if cur_quantity is not None:
self.entry[fruit_name] = cur_quantity + quantity
else:
raise KeyError(f"Not found fruit with name: {fruit_name}")
def take_fruit(self, fruit_name) -> bool:
cur_quantity = self.entry.get(fruit_name)
if cur_quantity is None:
raise KeyError(f"Not found fruit with name: {fruit_name}")
elif cur_quantity > 0:
self.entry[fruit_name] = cur_quantity - 1
return True
return False
def print_all_fruits(self) -> None:
for fruit, quantity in self.entry.items():
print(f"{fruit}: {quantity}")
| apache-2.0 |
v1ron/mk802iv-linux | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
vickenty/ookoobah | pyglet-c9188efc2e30/pyglet/image/codecs/gif.py | 22 | 5390 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Read GIF control data.
http://www.w3.org/Graphics/GIF/spec-gif89a.txt
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import struct
from pyglet.image.codecs import ImageDecodeException
class GIFStream(object):
def __init__(self):
self.images = []
class GIFImage(object):
delay = None
class GraphicsScope(object):
delay = None
# Appendix A.
LABEL_EXTENSION_INTRODUCER = 0x21
LABEL_GRAPHIC_CONTROL_EXTENSION = 0xf9
LABEL_IMAGE_DESCRIPTOR = 0x2c
LABEL_TRAILER = 0x3b
def unpack(format, file):
size = struct.calcsize(format)
data = file.read(size)
if len(data) < size:
raise ImageDecodeException('Unexpected EOF')
return struct.unpack(format, data)
def read_byte(file):
data = file.read(1)
if not len(data):
raise ImageDecodeException('Unexpected EOF')
return ord(data)
def read(file):
'''Read a GIF file stream.
:rtype: GIFStream
'''
# 17. Header
signature = file.read(3)
version = file.read(3)
if signature != b'GIF':
raise ImageDecodeException('Not a GIF stream')
stream = GIFStream()
# 18. Logical screen descriptor
(logical_screen_width,
logical_screen_height,
fields,
background_color_index,
pixel_aspect_ratio) = unpack('HHBBB', file)
global_color_table_flag = fields & 0x80
global_color_table_size = fields & 0x7
# 19. Global color table
if global_color_table_flag:
global_color_table = file.read(6 << global_color_table_size)
# <Data>*
graphics_scope = GraphicsScope()
block_type = read_byte(file)
while block_type != LABEL_TRAILER:
if block_type == LABEL_IMAGE_DESCRIPTOR:
read_table_based_image(file, stream, graphics_scope)
graphics_scope = GraphicsScope()
elif block_type == LABEL_EXTENSION_INTRODUCER:
extension_block_type = read_byte(file)
if extension_block_type == LABEL_GRAPHIC_CONTROL_EXTENSION:
read_graphic_control_extension(file, stream, graphics_scope)
else:
skip_data_sub_blocks(file)
else:
# Skip bytes until a valid start character is found
print block_type
pass
block_type = read_byte(file)
return stream
def skip_data_sub_blocks(file):
# 15. Data sub-blocks
block_size = read_byte(file)
while block_size != 0:
data = file.read(block_size)
block_size = read_byte(file)
def read_table_based_image(file, stream, graphics_scope):
gif_image = GIFImage()
stream.images.append(gif_image)
gif_image.delay = graphics_scope.delay
# 20. Image descriptor
(image_left_position,
image_top_position,
image_width,
image_height,
fields) = unpack('HHHHB', file)
local_color_table_flag = fields & 0x80
local_color_table_size = fields & 0x7
# 21. Local color table
if local_color_table_flag:
local_color_table = file.read(6 << local_color_table_size)
# 22. Table based image data
lzw_code_size = file.read(1)
skip_data_sub_blocks(file)
def read_graphic_control_extension(file, stream, graphics_scope):
# 23. Graphic control extension
(block_size,
fields,
delay_time,
transparent_color_index,
terminator) = unpack('BBHBB', file)
if block_size != 4:
raise ImageDecodeException('Incorrect block size')
if delay_time:
# Follow Firefox/Mac behaviour: use 100ms delay for any delay
# less than 10ms.
if delay_time <= 1:
delay_time = 10
graphics_scope.delay = float(delay_time) / 100
| mit |
DarkRebel/myrobotlab | src/resource/Python/examples/Joystick.Servo.Mech-Dickel.py | 5 | 4983 | exec arduino = Runtime.createAndStart("arduino","Arduino")
joystick = Runtime.createAndStart("joystick","Joystick")
dcMotorLeft = Runtime.createAndStart("dcMotorLeft","Servo")
dcMotorRight = Runtime.createAndStart("dcMotorRight","Servo")
shoulderPitchRight = Runtime.createAndStart("shoulderPitchRight","Servo")
shoulderYawRight = Runtime.createAndStart("shoulderYawRight","Servo")
elbowRight = Runtime.createAndStart("elbowRight","Servo")
handRight = Runtime.createAndStart("handRight","Servo")
headPan = Runtime.createAndStart("headPan","Servo")
headTilt = Runtime.createAndStart("headTilt","Servo")
arduino.connect("COM3")
sleep(4)
joystick.setController(2)
joystick.startPolling()
arduino.attach(dcMotorLeft.getName(),2)
arduino.attach(dcMotorRight.getName(),3)
arduino.attach(shoulderPitchRight.getName(),6)
arduino.attach(shoulderYawRight.getName(),7)
arduino.attach(elbowRight.getName(),8)
arduino.attach(handRight.getName(),9)
arduino.attach(headPan.getName(),10)
arduino.attach(headTilt.getName(),11)
a = 0
b = 0
dp = 0
rb = 0
sx = 0
sy = 0
x = 0
y = 0
z = 0
shoulderPitchRight.setSpeed(0.85)
shoulderYawRight.setSpeed(0.85)
elbowRight.setSpeed(0.85)
handRight.setSpeed(0.85)
headPan.setSpeed(0.85)
headTilt.setSpeed(0.85)
shoulderPitchRight.moveTo(145)
shoulderYawRight.moveTo(70)
elbowRight.moveTo(80)
handRight.moveTo(40)
headPan.moveTo(95)
headTilt.moveTo(90)
# define buttons' variables
def buttonA():
global a
a = msg_joystick_button1.data[0]
print a
check()
def buttonB():
global b
b = msg_joystick_button2.data[0]
print b
check()
def buttonLTandRT():
global z
z = msg_joystick_ZAxisRaw.data[0]
print z
check()
def buttonRB():
global rb
rb = msg_joystick_button6.data[0]
print rb
check()
def buttonX():
global x
x = msg_joystick_button3.data[0]
print x
check()
def buttonY():
global y
y = msg_joystick_button4.data[0]
print y
check()
def directional():
global dp
dp = msg_joystick_hatSwitchRaw.data[0]
print dp
check()
def leftStickX():
global sx
sx = msg_joystick_XAxisRaw.data[0]
print sx
check()
def leftStickY():
global sy
sy = msg_joystick_YAxisRaw.data[0]
print sy
check()
# define moves
def forward():
print "Forward"
dcMotorLeft.moveTo(180)
dcMotorRight.moveTo(180)
def backward():
print "Backward"
dcMotorLeft.moveTo(0)
dcMotorRight.moveTo(0)
def turnLeft():
print "Turn left"
dcMotorLeft.moveTo(0)
dcMotorRight.moveTo(180)
def turnRight():
print "Turn right"
dcMotorLeft.moveTo(180)
dcMotorRight.moveTo(0)
def check():
# dc motors
if ((sx >= -1) and (sx <= -0.8)):
turnLeft()
elif ((sx <= 1) and (sx >= 0.8)):
turnRight()
elif ((sy >= -1) and (sy <= -0.8)):
forward()
elif ((sy <= 1) and (sy >= 0.8)):
backward()
# right pitch shoulder
elif ((dp == 0.25) and (rb == 1)):
print "Right pitch shoulder decreasing"
shoulderPitchRight.moveTo(20)
elif ((dp == 0.75) and (rb == 1)):
print "Right pitch shoulder increasing"
shoulderPitchRight.moveTo(160)
# right yaw shoulder
elif ((dp == 0.5) and (rb == 1)):
print "Right yaw shoulder decreasing"
shoulderYawRight.moveTo(0)
elif ((dp == 1) and (rb == 1)):
print "Right yaw shoulder increasing"
shoulderYawRight.moveTo(110)
# right elbow
elif ((dp == 0.25) and (z <= -0.996)):
print "Right elbow decreasing"
elbowRight.moveTo(20)
elif ((dp == 0.75) and (z <= -0.996)):
print "Right elbow increasing"
elbowRight.moveTo(130)
# hand
elif ((z <= -0.996) and (a == 1)):
print "Hand closing"
handRight.moveTo(25)
elif ((z <= -0.996) and (b == 1)):
print "Hand opening"
handRight.moveTo(125)
# head
elif ((dp == 0.5) and (y == 1)):
print "Look right"
headPan.moveTo(150)
elif ((dp == 1) and (y == 1)):
print "Look left"
headPan.moveTo(40)
elif ((dp == 0.25) and (y == 1)):
print "Look down"
headTilt.moveTo(20)
elif ((dp == 0.75) and (y == 1)):
print "Look up"
headTilt.moveTo(125)
# while nothing pre-defined
else:
print "Just waiting..."
dcMotorLeft.moveTo(90)
dcMotorRight.moveTo(90)
shoulderPitchRight.stopServo()
shoulderYawRight.stopServo()
elbowRight.stopServo()
handRight.stopServo()
headPan.stopServo()
headTilt.stopServo()
joystick.addListener("button1", python.name, "buttonA")
joystick.addListener("button2", python.name, "buttonB")
joystick.addListener("button3", python.name, "buttonX")
joystick.addListener("button4", python.name, "buttonY")
joystick.addListener("button6", python.name, "buttonRB")
joystick.addListener("hatSwitchRaw", python.name, "directional")
joystick.addListener("XAxisRaw", python.name, "leftStickX")
joystick.addListener("YAxisRaw", python.name, "leftStickY")
joystick.addListener("ZAxisRaw", python.name, "buttonLTandRT") | apache-2.0 |
cpausmit/IntelROCCS | Detox/deleteDeprecated.py | 3 | 4521 | #!/usr/bin/python
#---------------------------------------------------------------------------------------------------
#
# This is the master script that user should run It runs all other auxilary scripts. At the end you
# will have full set of deletion suggestions.
#
# For now we get the list of all sites from the file with quotas because we need to know the
# capacity of each site (and right now we use only one group as proxy).
#
#---------------------------------------------------------------------------------------------------
import sys, os, re
import phedexApi
# setup definitions
if not os.environ.get('DETOX_DB'):
print '\n ERROR - DETOX environment not defined: source setup.sh\n'
sys.exit(0)
# make sure we start in the right directory
os.chdir(os.environ.get('DETOX_BASE'))
deprecated = {}
siteDsets = {}
siteSize2Del = {}
siteLCPerc = {}
#===================================================================================================
# H E L P E R S
#===================================================================================================
def readDeprecated():
filename = 'DeprecatedSets.txt'
inputFile = open(os.environ['DETOX_DB'] + '/' + os.environ['DETOX_STATUS'] + '/'
+ filename, 'r')
for line in inputFile.xreadlines():
name,status = line.split()
deprecated[name] = 1
inputFile.close()
def readMatchPhedex():
filename = os.environ['DETOX_PHEDEX_CACHE']
inputFile = open(os.environ['DETOX_DB'] + '/' + os.environ['DETOX_STATUS'] + '/'
+ filename, "r")
for line in inputFile.xreadlines():
line = line.rstrip()
items = line.split()
# print items
datasetName = items[0]
group = items[1]
if datasetName not in deprecated:
continue
if group != 'AnalysisOps':
continue
#if group != 'caf-comm':
# continue
size = float(items[3])
site = items[5]
print site
if site not in siteSize2Del:
siteSize2Del[site] = 0
siteSize2Del[site] = siteSize2Del[site] + size
if site not in siteDsets:
siteDsets[site] = [datasetName]
else:
siteDsets[site].append(datasetName)
inputFile.close()
def requestDeletions(site,dsetNames):
phedex = phedexApi.phedexApi(logPath='./')
# compose data for deletion request
check,data = phedex.xmlData(datasets=dsetNames,instance='prod',level='block')
if check:
print " ERROR - phedexApi.xmlData failed"
sys.exit(1)
# here the request is really sent
message = 'IntelROCCS -- Cache Release Request -- Deprecated Datasets'
check,response = phedex.delete(node=site,data=data,comments=message,instance='prod')
if check:
print " ERROR - phedexApi.delete failed"
print site
print response
sys.exit(1)
respo = response.read()
matchObj = re.search(r'"id":"(\d+)"',respo)
reqid = int(matchObj.group(1))
del phedex
# here we brute force deletion to be approved
phedex = phedexApi.phedexApi(logPath='./')
check,response = phedex.updateRequest(decision='approve',request=reqid,node=site,instance='prod')
if check:
print " ERROR - phedexApi.updateRequest failed - reqid="+ str(reqid)
print response
del phedex
#====================================================================================================
# M A I N
#====================================================================================================
# find deprecated datasets
readDeprecated()
# match deprecated datasets against phedex data
readMatchPhedex()
total = 0
for site in sorted(siteSize2Del):
print ("%-7.1f %-18s" %(siteSize2Del[site],site))
total = total + siteSize2Del[site]
print ("Total size to be deleted = %8.1f GB" %total)
#submit deletion requests for deprecated datasets
print "\n You want do continue with those deleteions? [Y/N]"
line = sys.stdin.readline()
line = line.rstrip()
if line == 'Y':
for site in sorted(siteDsets):
#if 'T2_' not in site: continue
#if site.startswith('T2_DE'):
# continue
if site.startswith('T2_CH'):
continue
setsToDelete = siteDsets[site]
print site
print siteSize2Del[site]
for dset in setsToDelete:
print dset
#print setsToDelete
requestDeletions(site,setsToDelete)
| mit |
dorianpula/paramiko | tests/loop.py | 4 | 2905 | # Copyright (C) 2003-2009 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
...
"""
import threading, socket
from paramiko.common import asbytes
class LoopSocket (object):
"""
A LoopSocket looks like a normal socket, but all data written to it is
delivered on the read-end of another LoopSocket, and vice versa. It's
like a software "socketpair".
"""
def __init__(self):
self.__in_buffer = bytes()
self.__lock = threading.Lock()
self.__cv = threading.Condition(self.__lock)
self.__timeout = None
self.__mate = None
self._closed = False
def close(self):
self.__unlink()
self._closed = True
try:
self.__lock.acquire()
self.__in_buffer = bytes()
finally:
self.__lock.release()
def send(self, data):
data = asbytes(data)
if self.__mate is None:
# EOF
raise EOFError()
self.__mate.__feed(data)
return len(data)
def recv(self, n):
self.__lock.acquire()
try:
if self.__mate is None:
# EOF
return bytes()
if len(self.__in_buffer) == 0:
self.__cv.wait(self.__timeout)
if len(self.__in_buffer) == 0:
raise socket.timeout
out = self.__in_buffer[:n]
self.__in_buffer = self.__in_buffer[n:]
return out
finally:
self.__lock.release()
def settimeout(self, n):
self.__timeout = n
def link(self, other):
self.__mate = other
self.__mate.__mate = self
def __feed(self, data):
self.__lock.acquire()
try:
self.__in_buffer += data
self.__cv.notifyAll()
finally:
self.__lock.release()
def __unlink(self):
m = None
self.__lock.acquire()
try:
if self.__mate is not None:
m = self.__mate
self.__mate = None
finally:
self.__lock.release()
if m is not None:
m.__unlink()
| lgpl-2.1 |
Ozmodian/Wordpress_local | eb/macosx/python2.7/lib/aws/requests/packages/charade/mbcharsetprober.py | 2924 | 3268 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
| gpl-2.0 |
sopier/django | tests/gis_tests/gdal_tests/test_raster.py | 238 | 13865 | """
gdalinfo tests/gis_tests/data/rasters/raster.tif:
Driver: GTiff/GeoTIFF
Files: tests/gis_tests/data/rasters/raster.tif
Size is 163, 174
Coordinate System is:
PROJCS["NAD83 / Florida GDL Albers",
GEOGCS["NAD83",
DATUM["North_American_Datum_1983",
SPHEROID["GRS 1980",6378137,298.2572221010002,
AUTHORITY["EPSG","7019"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6269"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4269"]],
PROJECTION["Albers_Conic_Equal_Area"],
PARAMETER["standard_parallel_1",24],
PARAMETER["standard_parallel_2",31.5],
PARAMETER["latitude_of_center",24],
PARAMETER["longitude_of_center",-84],
PARAMETER["false_easting",400000],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AUTHORITY["EPSG","3086"]]
Origin = (511700.468070655711927,435103.377123198588379)
Pixel Size = (100.000000000000000,-100.000000000000000)
Metadata:
AREA_OR_POINT=Area
Image Structure Metadata:
INTERLEAVE=BAND
Corner Coordinates:
Upper Left ( 511700.468, 435103.377) ( 82d51'46.16"W, 27d55' 1.53"N)
Lower Left ( 511700.468, 417703.377) ( 82d51'52.04"W, 27d45'37.50"N)
Upper Right ( 528000.468, 435103.377) ( 82d41'48.81"W, 27d54'56.30"N)
Lower Right ( 528000.468, 417703.377) ( 82d41'55.54"W, 27d45'32.28"N)
Center ( 519850.468, 426403.377) ( 82d46'50.64"W, 27d50'16.99"N)
Band 1 Block=163x50 Type=Byte, ColorInterp=Gray
NoData Value=15
"""
import os
import struct
import tempfile
import unittest
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.shortcuts import numpy
from django.utils import six
from django.utils._os import upath
from ..data.rasters.textrasters import JSON_RASTER
if HAS_GDAL:
from django.contrib.gis.gdal import GDALRaster, GDAL_VERSION
from django.contrib.gis.gdal.raster.band import GDALBand
@unittest.skipUnless(HAS_GDAL, "GDAL is required")
class GDALRasterTests(unittest.TestCase):
"""
Test a GDALRaster instance created from a file (GeoTiff).
"""
def setUp(self):
self.rs_path = os.path.join(os.path.dirname(upath(__file__)),
'../data/rasters/raster.tif')
self.rs = GDALRaster(self.rs_path)
def test_rs_name_repr(self):
self.assertEqual(self.rs_path, self.rs.name)
six.assertRegex(self, repr(self.rs), "<Raster object at 0x\w+>")
def test_rs_driver(self):
self.assertEqual(self.rs.driver.name, 'GTiff')
def test_rs_size(self):
self.assertEqual(self.rs.width, 163)
self.assertEqual(self.rs.height, 174)
def test_rs_srs(self):
self.assertEqual(self.rs.srs.srid, 3086)
self.assertEqual(self.rs.srs.units, (1.0, 'metre'))
def test_geotransform_and_friends(self):
# Assert correct values for file based raster
self.assertEqual(self.rs.geotransform,
[511700.4680706557, 100.0, 0.0, 435103.3771231986, 0.0, -100.0])
self.assertEqual(self.rs.origin, [511700.4680706557, 435103.3771231986])
self.assertEqual(self.rs.origin.x, 511700.4680706557)
self.assertEqual(self.rs.origin.y, 435103.3771231986)
self.assertEqual(self.rs.scale, [100.0, -100.0])
self.assertEqual(self.rs.scale.x, 100.0)
self.assertEqual(self.rs.scale.y, -100.0)
self.assertEqual(self.rs.skew, [0, 0])
self.assertEqual(self.rs.skew.x, 0)
self.assertEqual(self.rs.skew.y, 0)
# Create in-memory rasters and change gtvalues
rsmem = GDALRaster(JSON_RASTER)
rsmem.geotransform = range(6)
self.assertEqual(rsmem.geotransform, [float(x) for x in range(6)])
self.assertEqual(rsmem.origin, [0, 3])
self.assertEqual(rsmem.origin.x, 0)
self.assertEqual(rsmem.origin.y, 3)
self.assertEqual(rsmem.scale, [1, 5])
self.assertEqual(rsmem.scale.x, 1)
self.assertEqual(rsmem.scale.y, 5)
self.assertEqual(rsmem.skew, [2, 4])
self.assertEqual(rsmem.skew.x, 2)
self.assertEqual(rsmem.skew.y, 4)
self.assertEqual(rsmem.width, 5)
self.assertEqual(rsmem.height, 5)
def test_rs_extent(self):
self.assertEqual(self.rs.extent,
(511700.4680706557, 417703.3771231986,
528000.4680706557, 435103.3771231986))
def test_rs_bands(self):
self.assertEqual(len(self.rs.bands), 1)
self.assertIsInstance(self.rs.bands[0], GDALBand)
def test_file_based_raster_creation(self):
# Prepare tempfile
rstfile = tempfile.NamedTemporaryFile(suffix='.tif')
# Create file-based raster from scratch
GDALRaster({
'datatype': self.rs.bands[0].datatype(),
'driver': 'tif',
'name': rstfile.name,
'width': 163,
'height': 174,
'nr_of_bands': 1,
'srid': self.rs.srs.wkt,
'origin': (self.rs.origin.x, self.rs.origin.y),
'scale': (self.rs.scale.x, self.rs.scale.y),
'skew': (self.rs.skew.x, self.rs.skew.y),
'bands': [{
'data': self.rs.bands[0].data(),
'nodata_value': self.rs.bands[0].nodata_value,
}],
})
# Reload newly created raster from file
restored_raster = GDALRaster(rstfile.name)
self.assertEqual(restored_raster.srs.wkt, self.rs.srs.wkt)
self.assertEqual(restored_raster.geotransform, self.rs.geotransform)
if numpy:
numpy.testing.assert_equal(
restored_raster.bands[0].data(),
self.rs.bands[0].data()
)
else:
self.assertEqual(restored_raster.bands[0].data(), self.rs.bands[0].data())
def test_raster_warp(self):
# Create in memory raster
source = GDALRaster({
'datatype': 1,
'driver': 'MEM',
'name': 'sourceraster',
'width': 4,
'height': 4,
'nr_of_bands': 1,
'srid': 3086,
'origin': (500000, 400000),
'scale': (100, -100),
'skew': (0, 0),
'bands': [{
'data': range(16),
'nodata_value': 255,
}],
})
# Test altering the scale, width, and height of a raster
data = {
'scale': [200, -200],
'width': 2,
'height': 2,
}
target = source.warp(data)
self.assertEqual(target.width, data['width'])
self.assertEqual(target.height, data['height'])
self.assertEqual(target.scale, data['scale'])
self.assertEqual(target.bands[0].datatype(), source.bands[0].datatype())
self.assertEqual(target.name, 'sourceraster_copy.MEM')
result = target.bands[0].data()
if numpy:
result = result.flatten().tolist()
self.assertEqual(result, [5, 7, 13, 15])
# Test altering the name and datatype (to float)
data = {
'name': '/path/to/targetraster.tif',
'datatype': 6,
}
target = source.warp(data)
self.assertEqual(target.bands[0].datatype(), 6)
self.assertEqual(target.name, '/path/to/targetraster.tif')
self.assertEqual(target.driver.name, 'MEM')
result = target.bands[0].data()
if numpy:
result = result.flatten().tolist()
self.assertEqual(
result,
[0.0, 1.0, 2.0, 3.0,
4.0, 5.0, 6.0, 7.0,
8.0, 9.0, 10.0, 11.0,
12.0, 13.0, 14.0, 15.0]
)
def test_raster_transform(self):
if GDAL_VERSION < (1, 8, 1):
self.skipTest("GDAL >= 1.8.1 is required for this test")
# Prepare tempfile and nodata value
rstfile = tempfile.NamedTemporaryFile(suffix='.tif')
ndv = 99
# Create in file based raster
source = GDALRaster({
'datatype': 1,
'driver': 'tif',
'name': rstfile.name,
'width': 5,
'height': 5,
'nr_of_bands': 1,
'srid': 4326,
'origin': (-5, 5),
'scale': (2, -2),
'skew': (0, 0),
'bands': [{
'data': range(25),
'nodata_value': ndv,
}],
})
# Transform raster into srid 4326.
target = source.transform(3086)
# Reload data from disk
target = GDALRaster(target.name)
self.assertEqual(target.srs.srid, 3086)
self.assertEqual(target.width, 7)
self.assertEqual(target.height, 7)
self.assertEqual(target.bands[0].datatype(), source.bands[0].datatype())
self.assertEqual(target.origin, [9124842.791079799, 1589911.6476407414])
self.assertEqual(target.scale, [223824.82664250192, -223824.82664250192])
self.assertEqual(target.skew, [0, 0])
result = target.bands[0].data()
if numpy:
result = result.flatten().tolist()
# The reprojection of a raster that spans over a large area
# skews the data matrix and might introduce nodata values.
self.assertEqual(
result,
[
ndv, ndv, ndv, ndv, 4, ndv, ndv,
ndv, ndv, 2, 3, 9, ndv, ndv,
ndv, 1, 2, 8, 13, 19, ndv,
0, 6, 6, 12, 18, 18, 24,
ndv, 10, 11, 16, 22, 23, ndv,
ndv, ndv, 15, 21, 22, ndv, ndv,
ndv, ndv, 20, ndv, ndv, ndv, ndv,
]
)
@unittest.skipUnless(HAS_GDAL, "GDAL is required")
class GDALBandTests(unittest.TestCase):
def setUp(self):
self.rs_path = os.path.join(os.path.dirname(upath(__file__)),
'../data/rasters/raster.tif')
rs = GDALRaster(self.rs_path)
self.band = rs.bands[0]
def test_band_data(self):
self.assertEqual(self.band.width, 163)
self.assertEqual(self.band.height, 174)
self.assertEqual(self.band.description, '')
self.assertEqual(self.band.datatype(), 1)
self.assertEqual(self.band.datatype(as_string=True), 'GDT_Byte')
self.assertEqual(self.band.min, 0)
self.assertEqual(self.band.max, 255)
self.assertEqual(self.band.nodata_value, 15)
def test_read_mode_error(self):
# Open raster in read mode
rs = GDALRaster(self.rs_path, write=False)
band = rs.bands[0]
# Setting attributes in write mode raises exception in the _flush method
self.assertRaises(GDALException, setattr, band, 'nodata_value', 10)
def test_band_data_setters(self):
# Create in-memory raster and get band
rsmem = GDALRaster({
'datatype': 1,
'driver': 'MEM',
'name': 'mem_rst',
'width': 10,
'height': 10,
'nr_of_bands': 1,
'srid': 4326,
})
bandmem = rsmem.bands[0]
# Set nodata value
bandmem.nodata_value = 99
self.assertEqual(bandmem.nodata_value, 99)
# Set data for entire dataset
bandmem.data(range(100))
if numpy:
numpy.testing.assert_equal(bandmem.data(), numpy.arange(100).reshape(10, 10))
else:
self.assertEqual(bandmem.data(), list(range(100)))
# Prepare data for setting values in subsequent tests
block = list(range(100, 104))
packed_block = struct.pack('<' + 'B B B B', *block)
# Set data from list
bandmem.data(block, (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from packed block
bandmem.data(packed_block, (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from bytes
bandmem.data(bytes(packed_block), (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from bytearray
bandmem.data(bytearray(packed_block), (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from memoryview
bandmem.data(six.memoryview(packed_block), (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from numpy array
if numpy:
bandmem.data(numpy.array(block, dtype='int8').reshape(2, 2), (1, 1), (2, 2))
numpy.testing.assert_equal(
bandmem.data(offset=(1, 1), size=(2, 2)),
numpy.array(block).reshape(2, 2)
)
# Test json input data
rsmemjson = GDALRaster(JSON_RASTER)
bandmemjson = rsmemjson.bands[0]
if numpy:
numpy.testing.assert_equal(
bandmemjson.data(),
numpy.array(range(25)).reshape(5, 5)
)
else:
self.assertEqual(bandmemjson.data(), list(range(25)))
| bsd-3-clause |
semprebon/mapnik | scons/scons-local-1.2.0/SCons/Tool/aixf77.py | 12 | 2611 | """engine.SCons.Tool.aixf77
Tool-specific initialization for IBM Visual Age f77 Fortran compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/aixf77.py 3842 2008/12/20 22:59:52 scons"
import os.path
#import SCons.Platform.aix
import f77
# It would be good to look for the AIX F77 package the same way we're now
# looking for the C and C++ packages. This should be as easy as supplying
# the correct package names in the following list and uncommenting the
# SCons.Platform.aix_get_xlc() call the in the function below.
packages = []
def get_xlf77(env):
xlf77 = env.get('F77', 'xlf77')
xlf77_r = env.get('SHF77', 'xlf77_r')
#return SCons.Platform.aix.get_xlc(env, xlf77, xlf77_r, packages)
return (None, xlf77, xlf77_r, None)
def generate(env):
"""
Add Builders and construction variables for the Visual Age FORTRAN
compiler to an Environment.
"""
path, _f77, _shf77, version = get_xlf77(env)
if path:
_f77 = os.path.join(path, _f77)
_shf77 = os.path.join(path, _shf77)
f77.generate(env)
env['F77'] = _f77
env['SHF77'] = _shf77
def exists(env):
path, _f77, _shf77, version = get_xlf77(env)
if path and _f77:
xlf77 = os.path.join(path, _f77)
if os.path.exists(xlf77):
return xlf77
return None
| lgpl-2.1 |
axinging/chromium-crosswalk | third_party/logilab/logilab/common/vcgutils.py | 117 | 7673 | # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Functions to generate files readable with Georg Sander's vcg
(Visualization of Compiler Graphs).
You can download vcg at http://rw4.cs.uni-sb.de/~sander/html/gshome.html
Note that vcg exists as a debian package.
See vcg's documentation for explanation about the different values that
maybe used for the functions parameters.
"""
__docformat__ = "restructuredtext en"
import string
ATTRS_VAL = {
'algos': ('dfs', 'tree', 'minbackward',
'left_to_right', 'right_to_left',
'top_to_bottom', 'bottom_to_top',
'maxdepth', 'maxdepthslow', 'mindepth', 'mindepthslow',
'mindegree', 'minindegree', 'minoutdegree',
'maxdegree', 'maxindegree', 'maxoutdegree'),
'booleans': ('yes', 'no'),
'colors': ('black', 'white', 'blue', 'red', 'green', 'yellow',
'magenta', 'lightgrey',
'cyan', 'darkgrey', 'darkblue', 'darkred', 'darkgreen',
'darkyellow', 'darkmagenta', 'darkcyan', 'gold',
'lightblue', 'lightred', 'lightgreen', 'lightyellow',
'lightmagenta', 'lightcyan', 'lilac', 'turquoise',
'aquamarine', 'khaki', 'purple', 'yellowgreen', 'pink',
'orange', 'orchid'),
'shapes': ('box', 'ellipse', 'rhomb', 'triangle'),
'textmodes': ('center', 'left_justify', 'right_justify'),
'arrowstyles': ('solid', 'line', 'none'),
'linestyles': ('continuous', 'dashed', 'dotted', 'invisible'),
}
# meaning of possible values:
# O -> string
# 1 -> int
# list -> value in list
GRAPH_ATTRS = {
'title': 0,
'label': 0,
'color': ATTRS_VAL['colors'],
'textcolor': ATTRS_VAL['colors'],
'bordercolor': ATTRS_VAL['colors'],
'width': 1,
'height': 1,
'borderwidth': 1,
'textmode': ATTRS_VAL['textmodes'],
'shape': ATTRS_VAL['shapes'],
'shrink': 1,
'stretch': 1,
'orientation': ATTRS_VAL['algos'],
'vertical_order': 1,
'horizontal_order': 1,
'xspace': 1,
'yspace': 1,
'layoutalgorithm': ATTRS_VAL['algos'],
'late_edge_labels': ATTRS_VAL['booleans'],
'display_edge_labels': ATTRS_VAL['booleans'],
'dirty_edge_labels': ATTRS_VAL['booleans'],
'finetuning': ATTRS_VAL['booleans'],
'manhattan_edges': ATTRS_VAL['booleans'],
'smanhattan_edges': ATTRS_VAL['booleans'],
'port_sharing': ATTRS_VAL['booleans'],
'edges': ATTRS_VAL['booleans'],
'nodes': ATTRS_VAL['booleans'],
'splines': ATTRS_VAL['booleans'],
}
NODE_ATTRS = {
'title': 0,
'label': 0,
'color': ATTRS_VAL['colors'],
'textcolor': ATTRS_VAL['colors'],
'bordercolor': ATTRS_VAL['colors'],
'width': 1,
'height': 1,
'borderwidth': 1,
'textmode': ATTRS_VAL['textmodes'],
'shape': ATTRS_VAL['shapes'],
'shrink': 1,
'stretch': 1,
'vertical_order': 1,
'horizontal_order': 1,
}
EDGE_ATTRS = {
'sourcename': 0,
'targetname': 0,
'label': 0,
'linestyle': ATTRS_VAL['linestyles'],
'class': 1,
'thickness': 0,
'color': ATTRS_VAL['colors'],
'textcolor': ATTRS_VAL['colors'],
'arrowcolor': ATTRS_VAL['colors'],
'backarrowcolor': ATTRS_VAL['colors'],
'arrowsize': 1,
'backarrowsize': 1,
'arrowstyle': ATTRS_VAL['arrowstyles'],
'backarrowstyle': ATTRS_VAL['arrowstyles'],
'textmode': ATTRS_VAL['textmodes'],
'priority': 1,
'anchor': 1,
'horizontal_order': 1,
}
# Misc utilities ###############################################################
def latin_to_vcg(st):
"""Convert latin characters using vcg escape sequence.
"""
for char in st:
if char not in string.ascii_letters:
try:
num = ord(char)
if num >= 192:
st = st.replace(char, r'\fi%d'%ord(char))
except:
pass
return st
class VCGPrinter:
"""A vcg graph writer.
"""
def __init__(self, output_stream):
self._stream = output_stream
self._indent = ''
def open_graph(self, **args):
"""open a vcg graph
"""
self._stream.write('%sgraph:{\n'%self._indent)
self._inc_indent()
self._write_attributes(GRAPH_ATTRS, **args)
def close_graph(self):
"""close a vcg graph
"""
self._dec_indent()
self._stream.write('%s}\n'%self._indent)
def node(self, title, **args):
"""draw a node
"""
self._stream.write('%snode: {title:"%s"' % (self._indent, title))
self._write_attributes(NODE_ATTRS, **args)
self._stream.write('}\n')
def edge(self, from_node, to_node, edge_type='', **args):
"""draw an edge from a node to another.
"""
self._stream.write(
'%s%sedge: {sourcename:"%s" targetname:"%s"' % (
self._indent, edge_type, from_node, to_node))
self._write_attributes(EDGE_ATTRS, **args)
self._stream.write('}\n')
# private ##################################################################
def _write_attributes(self, attributes_dict, **args):
"""write graph, node or edge attributes
"""
for key, value in args.items():
try:
_type = attributes_dict[key]
except KeyError:
raise Exception('''no such attribute %s
possible attributes are %s''' % (key, attributes_dict.keys()))
if not _type:
self._stream.write('%s%s:"%s"\n' % (self._indent, key, value))
elif _type == 1:
self._stream.write('%s%s:%s\n' % (self._indent, key,
int(value)))
elif value in _type:
self._stream.write('%s%s:%s\n' % (self._indent, key, value))
else:
raise Exception('''value %s isn\'t correct for attribute %s
correct values are %s''' % (value, key, _type))
def _inc_indent(self):
"""increment indentation
"""
self._indent = ' %s' % self._indent
def _dec_indent(self):
"""decrement indentation
"""
self._indent = self._indent[:-2]
| bsd-3-clause |
HM2MC/Webfront | reportlab-2.5/tests/test_pdfbase_encodings.py | 7 | 10325 | from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation, NearTestCase
setOutDir(__name__)
import unittest
from reportlab.pdfgen.canvas import Canvas
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfbase import pdfutils
from reportlab.platypus.paragraph import Paragraph
from reportlab.lib.styles import ParagraphStyle
from reportlab.graphics.shapes import Drawing, String, Ellipse
import re
import codecs
textPat = re.compile(r'\([^(]*\)')
#test sentences
testCp1252 = 'copyright %s trademark %s registered %s ReportLab! Ol%s!' % (chr(169), chr(153),chr(174), chr(0xe9))
testUni = unicode(testCp1252, 'cp1252')
testUTF8 = testUni.encode('utf-8')
# expected result is octal-escaped text in the PDF
expectedCp1252 = pdfutils._escape(testCp1252)
def extractText(pdfOps):
"""Utility to rip out the PDF text within a block of PDF operators.
PDF will show a string draw as something like "(Hello World) Tj"
i.e. text is in curved brackets. Crude and dirty, probably fails
on escaped brackets.
"""
found = textPat.findall(pdfOps)
#chop off '(' and ')'
return map(lambda x:x[1:-1], found)
def subsetToUnicode(ttf, subsetCodeStr):
"""Return unicode string represented by given subsetCode string
as found when TrueType font rendered to PDF, ttf must be the font
object that was used."""
# This relies on TTFont internals and uses the first document
# and subset it finds
subset = ttf.state.values()[0].subsets[0]
chrs = []
for codeStr in subsetCodeStr.split('\\'):
if codeStr:
chrs.append(unichr(subset[int(codeStr[1:], 8)]))
return u''.join(chrs)
class TextEncodingTestCase(NearTestCase):
"""Tests of expected Unicode and encoding behaviour
"""
def setUp(self):
self.vera = TTFont("Vera", "Vera.ttf")
pdfmetrics.registerFont(self.vera)
self.styNormal = ParagraphStyle(name='Helvetica', fontName='Helvetica-Oblique')
self.styTrueType = ParagraphStyle(name='TrueType', fontName='Vera')
def testStringWidth(self):
msg = 'Hello World'
self.assertNear(pdfmetrics.stringWidth(msg, 'Courier', 10),66.0)
self.assertNear(pdfmetrics.stringWidth(msg, 'Helvetica', 10),51.67)
self.assertNear(pdfmetrics.stringWidth(msg, 'Times-Roman', 10),50.27)
self.assertNear(pdfmetrics.stringWidth(msg, 'Vera', 10),57.7685546875)
uniMsg1 = u"Hello World"
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Courier', 10),66.0)
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Helvetica', 10),51.67)
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Times-Roman', 10),50.27)
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Vera', 10),57.7685546875)
# Courier are all 600 ems wide. So if one 'measures as utf8' one will
# get a wrong width as extra characters are seen
self.assertEquals(len(testCp1252),52)
self.assertNear(pdfmetrics.stringWidth(testCp1252, 'Courier', 10, 'cp1252'),312.0)
# the test string has 5 more bytes and so "measures too long" if passed to
# a single-byte font which treats it as a single-byte string.
self.assertEquals(len(testUTF8),57)
self.assertNear(pdfmetrics.stringWidth(testUTF8, 'Courier', 10),312.0)
self.assertEquals(len(testUni),52)
self.assertNear(pdfmetrics.stringWidth(testUni, 'Courier', 10),312.0)
# now try a TrueType font. Should be able to accept Unicode or UTF8
self.assertNear(pdfmetrics.stringWidth(testUTF8, 'Vera', 10),279.809570313)
self.assertNear(pdfmetrics.stringWidth(testUni, 'Vera', 10),279.809570313)
def testUtf8Canvas(self):
"""Verify canvas declared as utf8 autoconverts.
This assumes utf8 input. It converts to the encoding of the
underlying font, so both text lines APPEAR the same."""
c = Canvas(outputfile('test_pdfbase_encodings_utf8.pdf'))
c.drawString(100,700, testUTF8)
# Set a font with UTF8 encoding
c.setFont('Vera', 12)
# This should pass the UTF8 through unchanged
c.drawString(100,600, testUTF8)
# and this should convert from Unicode to UTF8
c.drawString(100,500, testUni)
# now add a paragraph in Latin-1 in the latin-1 style
p = Paragraph(testUTF8, style=self.styNormal, encoding="utf-8")
w, h = p.wrap(150, 100)
p.drawOn(c, 100, 400) #3
c.rect(100,300,w,h)
# now add a paragraph in UTF-8 in the UTF-8 style
p2 = Paragraph(testUTF8, style=self.styTrueType, encoding="utf-8")
w, h = p2.wrap(150, 100)
p2.drawOn(c, 300, 400) #4
c.rect(100,300,w,h)
# now add a paragraph in Unicode in the latin-1 style
p3 = Paragraph(testUni, style=self.styNormal)
w, h = p3.wrap(150, 100)
p3.drawOn(c, 100, 300)
c.rect(100,300,w,h)
# now add a paragraph in Unicode in the UTF-8 style
p4 = Paragraph(testUni, style=self.styTrueType)
p4.wrap(150, 100)
p4.drawOn(c, 300, 300)
c.rect(300,300,w,h)
# now a graphic
d1 = Drawing(400,50)
d1.add(Ellipse(200,25,200,12.5, fillColor=None))
d1.add(String(200,25,testUTF8, textAnchor='middle', encoding='utf-8'))
d1.drawOn(c, 100, 150)
# now a graphic in utf8
d2 = Drawing(400,50)
d2.add(Ellipse(200,25,200,12.5, fillColor=None))
d2.add(String(200,25,testUTF8, fontName='Vera', textAnchor='middle', encoding='utf-8'))
d2.drawOn(c, 100, 100)
# now a graphic in Unicode with T1 font
d3 = Drawing(400,50)
d3.add(Ellipse(200,25,200,12.5, fillColor=None))
d3.add(String(200,25,testUni, textAnchor='middle'))
d3.drawOn(c, 100, 50)
# now a graphic in Unicode with TT font
d4 = Drawing(400,50)
d4.add(Ellipse(200,25,200,12.5, fillColor=None))
d4.add(String(200,25,testUni, fontName='Vera', textAnchor='middle'))
d4.drawOn(c, 100, 0)
extracted = extractText(c.getCurrentPageContent())
self.assertEquals(extracted[0], expectedCp1252)
self.assertEquals(extracted[1], extracted[2])
#self.assertEquals(subsetToUnicode(self.vera, extracted[1]), testUni)
c.save()
class FontEncodingTestCase(unittest.TestCase):
"""Make documents with custom encodings of Type 1 built-in fonts.
Nothing really to do with character encodings; this is about hacking the font itself"""
def test0(self):
"Make custom encodings of standard fonts"
# make a custom encoded font.
c = Canvas(outputfile('test_pdfbase_encodings.pdf'))
c.setPageCompression(0)
c.setFont('Helvetica', 12)
c.drawString(100, 700, 'The text below should be in a custom encoding in which all vowels become "z"')
# invent a new language where vowels are replaced with letter 'z'
zenc = pdfmetrics.Encoding('EncodingWithoutVowels', 'WinAnsiEncoding')
for ch in 'aeiou':
zenc[ord(ch)] = 'z'
for ch in 'AEIOU':
zenc[ord(ch)] = 'Z'
pdfmetrics.registerEncoding(zenc)
# now we can make a font based on this encoding
# AR hack/workaround: the name of the encoding must be a Python codec!
f = pdfmetrics.Font('FontWithoutVowels', 'Helvetica-Oblique', 'EncodingWithoutVowels')
pdfmetrics.registerFont(f)
c.setFont('FontWithoutVowels', 12)
c.drawString(125, 675, "The magic word is squamish ossifrage")
# now demonstrate adding a Euro to MacRoman, which lacks one
c.setFont('Helvetica', 12)
c.drawString(100, 650, "MacRoman encoding lacks a Euro. We'll make a Mac font with the Euro at #219:")
# WinAnsi Helvetica
pdfmetrics.registerFont(pdfmetrics.Font('Helvetica-WinAnsi', 'Helvetica-Oblique', 'WinAnsiEncoding'))
c.setFont('Helvetica-WinAnsi', 12)
c.drawString(125, 625, 'WinAnsi with Euro: character 128 = "\200"')
pdfmetrics.registerFont(pdfmetrics.Font('MacHelvNoEuro', 'Helvetica-Oblique', 'MacRomanEncoding'))
c.setFont('MacHelvNoEuro', 12)
c.drawString(125, 600, 'Standard MacRoman, no Euro: Character 219 = "\333"') # oct(219)=0333
# now make our hacked encoding
euroMac = pdfmetrics.Encoding('MacWithEuro', 'MacRomanEncoding')
euroMac[219] = 'Euro'
pdfmetrics.registerEncoding(euroMac)
pdfmetrics.registerFont(pdfmetrics.Font('MacHelvWithEuro', 'Helvetica-Oblique', 'MacWithEuro'))
c.setFont('MacHelvWithEuro', 12)
c.drawString(125, 575, 'Hacked MacRoman with Euro: Character 219 = "\333"') # oct(219)=0333
# now test width setting with and without _rl_accel - harder
# make an encoding where 'm' becomes 'i'
c.setFont('Helvetica', 12)
c.drawString(100, 500, "Recode 'm' to 'i' and check we can measure widths. Boxes should surround letters.")
sample = 'Mmmmm. ' * 6 + 'Mmmm'
c.setFont('Helvetica-Oblique',12)
c.drawString(125, 475, sample)
w = c.stringWidth(sample, 'Helvetica-Oblique', 12)
c.rect(125, 475, w, 12)
narrowEnc = pdfmetrics.Encoding('m-to-i')
narrowEnc[ord('m')] = 'i'
narrowEnc[ord('M')] = 'I'
pdfmetrics.registerEncoding(narrowEnc)
pdfmetrics.registerFont(pdfmetrics.Font('narrow', 'Helvetica-Oblique', 'm-to-i'))
c.setFont('narrow', 12)
c.drawString(125, 450, sample)
w = c.stringWidth(sample, 'narrow', 12)
c.rect(125, 450, w, 12)
c.setFont('Helvetica', 12)
c.drawString(100, 400, "Symbol & Dingbats fonts - check we still get valid PDF in StandardEncoding")
c.setFont('Symbol', 12)
c.drawString(100, 375, 'abcdefghijklmn')
c.setFont('ZapfDingbats', 12)
c.drawString(300, 375, 'abcdefghijklmn')
c.save()
def makeSuite():
return makeSuiteForClasses(
TextEncodingTestCase,
#FontEncodingTestCase - nobbled for now due to old stuff which needs removing.
)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
| mit |
byterom/android_external_chromium_org | chrome/common/extensions/docs/server2/content_providers.py | 41 | 7663 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import traceback
from chroot_file_system import ChrootFileSystem
from content_provider import ContentProvider
import environment
from extensions_paths import CONTENT_PROVIDERS, LOCAL_DEBUG_DIR
from future import Future
from gitiles_file_system import GitilesFileSystem
from local_file_system import LocalFileSystem
from third_party.json_schema_compiler.memoize import memoize
_IGNORE_MISSING_CONTENT_PROVIDERS = [False]
def IgnoreMissingContentProviders(fn):
'''Decorates |fn| to ignore missing content providers during its run.
'''
def run(*args, **optargs):
saved = _IGNORE_MISSING_CONTENT_PROVIDERS[0]
_IGNORE_MISSING_CONTENT_PROVIDERS[0] = True
try:
return fn(*args, **optargs)
finally:
_IGNORE_MISSING_CONTENT_PROVIDERS[0] = saved
return run
class ContentProviders(object):
'''Implements the content_providers.json configuration; see
chrome/common/extensions/docs/templates/json/content_providers.json for its
current state and a description of the format.
Returns ContentProvider instances based on how they're configured there.
'''
def __init__(self,
object_store_creator,
compiled_fs_factory,
host_file_system,
github_file_system_provider,
gcs_file_system_provider):
self._object_store_creator = object_store_creator
self._compiled_fs_factory = compiled_fs_factory
self._host_file_system = host_file_system
self._github_file_system_provider = github_file_system_provider
self._gcs_file_system_provider = gcs_file_system_provider
self._cache = None
# If running the devserver and there is a LOCAL_DEBUG_DIR, we
# will read the content_provider configuration from there instead
# of fetching it from Gitiles or patch.
if environment.IsDevServer() and os.path.exists(LOCAL_DEBUG_DIR):
local_fs = LocalFileSystem(LOCAL_DEBUG_DIR)
conf_stat = None
try:
conf_stat = local_fs.Stat(CONTENT_PROVIDERS)
except:
pass
if conf_stat:
logging.warn(("Using local debug folder (%s) for "
"content_provider.json configuration") % LOCAL_DEBUG_DIR)
self._cache = compiled_fs_factory.ForJson(local_fs)
if not self._cache:
self._cache = compiled_fs_factory.ForJson(host_file_system)
@memoize
def GetByName(self, name):
'''Gets the ContentProvider keyed by |name| in content_providers.json, or
None of there is no such content provider.
'''
config = self._GetConfig().get(name)
if config is None:
logging.error('No content provider found with name "%s"' % name)
return None
return self._CreateContentProvider(name, config)
@memoize
def GetByServeFrom(self, path):
'''Gets a (content_provider, serve_from, path_in_content_provider) tuple,
where content_provider is the ContentProvider with the longest "serveFrom"
property that is a subpath of |path|, serve_from is that property, and
path_in_content_provider is the remainder of |path|.
For example, if content provider A serves from "foo" and content provider B
serves from "foo/bar", GetByServeFrom("foo/bar/baz") will return (B,
"foo/bar", "baz").
Returns (None, '', |path|) if no ContentProvider serves from |path|.
'''
serve_from_to_config = dict(
(config['serveFrom'], (name, config))
for name, config in self._GetConfig().iteritems())
path_parts = path.split('/')
for i in xrange(len(path_parts), -1, -1):
name_and_config = serve_from_to_config.get('/'.join(path_parts[:i]))
if name_and_config is not None:
return (self._CreateContentProvider(name_and_config[0],
name_and_config[1]),
'/'.join(path_parts[:i]),
'/'.join(path_parts[i:]))
return None, '', path
def _GetConfig(self):
return self._cache.GetFromFile(CONTENT_PROVIDERS).Get()
def _CreateContentProvider(self, name, config):
default_extensions = config.get('defaultExtensions', ())
supports_templates = config.get('supportsTemplates', False)
supports_zip = config.get('supportsZip', False)
if 'chromium' in config:
chromium_config = config['chromium']
if 'dir' not in chromium_config:
logging.error('%s: "chromium" must have a "dir" property' % name)
return None
file_system = ChrootFileSystem(self._host_file_system,
chromium_config['dir'])
# TODO(rockot): Remove this in a future patch. It should not be needed once
# the new content_providers.json is committed.
elif 'gitiles' in config:
chromium_config = config['gitiles']
if 'dir' not in chromium_config:
logging.error('%s: "chromium" must have a "dir" property' % name)
return None
file_system = ChrootFileSystem(self._host_file_system,
chromium_config['dir'])
elif 'gcs' in config:
gcs_config = config['gcs']
if 'bucket' not in gcs_config:
logging.error('%s: "gcs" must have a "bucket" property' % name)
return None
bucket = gcs_config['bucket']
if not bucket.startswith('gs://'):
logging.error('%s: bucket %s should start with gs://' % (name, bucket))
return None
bucket = bucket[len('gs://'):]
file_system = self._gcs_file_system_provider.Create(bucket)
if 'dir' in gcs_config:
file_system = ChrootFileSystem(file_system, gcs_config['dir'])
elif 'github' in config:
github_config = config['github']
if 'owner' not in github_config or 'repo' not in github_config:
logging.error('%s: "github" must provide an "owner" and "repo"' % name)
return None
file_system = self._github_file_system_provider.Create(
github_config['owner'], github_config['repo'])
if 'dir' in github_config:
file_system = ChrootFileSystem(file_system, github_config['dir'])
else:
logging.error('%s: content provider type not supported' % name)
return None
return ContentProvider(name,
self._compiled_fs_factory,
file_system,
self._object_store_creator,
default_extensions=default_extensions,
supports_templates=supports_templates,
supports_zip=supports_zip)
def GetRefreshPaths(self):
return self._GetConfig().keys()
def Refresh(self, path):
def safe(name, action, callback):
'''Safely runs |callback| for a ContentProvider called |name| by
swallowing exceptions and turning them into a None return value. It's
important to run all ContentProvider Refreshes even if some of them fail.
'''
try:
return callback()
except:
if not _IGNORE_MISSING_CONTENT_PROVIDERS[0]:
logging.error('Error %s Refresh for ContentProvider "%s":\n%s' %
(action, name, traceback.format_exc()))
return None
config = self._GetConfig()[path]
provider = self._CreateContentProvider(path, config)
future = safe(path,
'initializing',
self._CreateContentProvider(path, config).Refresh)
if future is None:
return Future(callback=lambda: True)
return Future(callback=lambda: safe(path, 'resolving', future.Get))
| bsd-3-clause |
hornygranny/-tg-station | tools/ss13_genchangelog.py | 5 | 8506 | '''
Usage:
$ python ss13_genchangelog.py [--dry-run] html/changelog.html html/changelogs/
ss13_genchangelog.py - Generate changelog from YAML.
Copyright 2013 Rob "N3X15" Nelson <nexis@7chan.org>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
from __future__ import print_function
import yaml, os, glob, sys, re, time, argparse
from datetime import datetime, date, timedelta
from time import time
today = date.today()
dateformat = "%d %B %Y"
opt = argparse.ArgumentParser()
opt.add_argument('-d', '--dry-run', dest='dryRun', default=False, action='store_true', help='Only parse changelogs and, if needed, the targetFile. (A .dry_changelog.yml will be output for debugging purposes.)')
opt.add_argument('-t', '--time-period', dest='timePeriod', default=4, type=int, help='Define how many weeks back the changelog should display')
opt.add_argument('targetFile', help='The HTML changelog we wish to update.')
opt.add_argument('ymlDir', help='The directory of YAML changelogs we will use.')
args = opt.parse_args()
all_changelog_entries = {}
validPrefixes = [
'bugfix',
'wip',
'tweak',
'soundadd',
'sounddel',
'rscdel',
'rscadd',
'imageadd',
'imagedel',
'spellcheck',
'experiment',
'tgs'
]
def dictToTuples(inp):
return [(k, v) for k, v in inp.items()]
changelog_cache = os.path.join(args.ymlDir, '.all_changelog.yml')
failed_cache_read = True
if os.path.isfile(changelog_cache):
try:
with open(changelog_cache) as f:
(_, all_changelog_entries) = yaml.load_all(f)
failed_cache_read = False
# Convert old timestamps to newer format.
new_entries = {}
for _date in all_changelog_entries.keys():
ty = type(_date).__name__
# print(ty)
if ty in ['str', 'unicode']:
temp_data = all_changelog_entries[_date]
_date = datetime.strptime(_date, dateformat).date()
new_entries[_date] = temp_data
else:
new_entries[_date] = all_changelog_entries[_date]
all_changelog_entries = new_entries
except Exception as e:
print("Failed to read cache:")
print(e, file=sys.stderr)
if args.dryRun:
changelog_cache = os.path.join(args.ymlDir, '.dry_changelog.yml')
if failed_cache_read and os.path.isfile(args.targetFile):
from bs4 import BeautifulSoup
from bs4.element import NavigableString
print(' Generating cache...')
with open(args.targetFile, 'r') as f:
soup = BeautifulSoup(f)
for e in soup.find_all('div', {'class':'commit'}):
entry = {}
date = datetime.strptime(e.h2.string.strip(), dateformat).date() # key
for authorT in e.find_all('h3', {'class':'author'}):
author = authorT.string
# Strip suffix
if author.endswith('updated:'):
author = author[:-8]
author = author.strip()
# Find <ul>
ulT = authorT.next_sibling
while(ulT.name != 'ul'):
ulT = ulT.next_sibling
changes = []
for changeT in ulT.children:
if changeT.name != 'li': continue
val = changeT.decode_contents(formatter="html")
newdat = {changeT['class'][0] + '': val + ''}
if newdat not in changes:
changes += [newdat]
if len(changes) > 0:
entry[author] = changes
if date in all_changelog_entries:
all_changelog_entries[date].update(entry)
else:
all_changelog_entries[date] = entry
del_after = []
print('Reading changelogs...')
for fileName in glob.glob(os.path.join(args.ymlDir, "*.yml")):
name, ext = os.path.splitext(os.path.basename(fileName))
if name.startswith('.'): continue
if name == 'example': continue
fileName = os.path.abspath(fileName)
print(' Reading {}...'.format(fileName))
cl = {}
with open(fileName, 'r') as f:
cl = yaml.load(f)
f.close()
if today not in all_changelog_entries:
all_changelog_entries[today] = {}
author_entries = all_changelog_entries[today].get(cl['author'], [])
if len(cl['changes']):
new = 0
for change in cl['changes']:
if change not in author_entries:
(change_type, _) = dictToTuples(change)[0]
if change_type not in validPrefixes:
print(' {0}: Invalid prefix {1}'.format(fileName, change_type), file=sys.stderr)
author_entries += [change]
new += 1
all_changelog_entries[today][cl['author']] = author_entries
if new > 0:
print(' Added {0} new changelog entries.'.format(new))
if cl.get('delete-after', False):
if os.path.isfile(fileName):
if args.dryRun:
print(' Would delete {0} (delete-after set)...'.format(fileName))
else:
del_after += [fileName]
if args.dryRun: continue
cl['changes'] = []
with open(fileName, 'w') as f:
yaml.dump(cl, f, default_flow_style=False)
targetDir = os.path.dirname(args.targetFile)
with open(args.targetFile.replace('.htm', '.dry.htm') if args.dryRun else args.targetFile, 'w') as changelog:
with open(os.path.join(targetDir, 'templates', 'header.html'), 'r') as h:
for line in h:
changelog.write(line)
weekstoshow = timedelta(weeks=args.timePeriod)
for _date in reversed(sorted(all_changelog_entries.keys())):
if not (today - _date < weekstoshow):
continue
entry_htm = '\n'
entry_htm += '\t\t\t<h2 class="date">{date}</h2>\n'.format(date=_date.strftime(dateformat))
write_entry = False
for author in sorted(all_changelog_entries[_date].keys()):
if len(all_changelog_entries[_date]) == 0: continue
author_htm = '\t\t\t<h3 class="author">{author} updated:</h3>\n'.format(author=author)
author_htm += '\t\t\t<ul class="changes bgimages16">\n'
changes_added = []
for (css_class, change) in (dictToTuples(e)[0] for e in all_changelog_entries[_date][author]):
if change in changes_added: continue
write_entry = True
changes_added += [change]
author_htm += '\t\t\t\t<li class="{css_class}">{change}</li>\n'.format(css_class=css_class, change=change.strip())
author_htm += '\t\t\t</ul>\n'
if len(changes_added) > 0:
entry_htm += author_htm
if write_entry:
changelog.write(entry_htm)
with open(os.path.join(targetDir, 'templates', 'footer.html'), 'r') as h:
for line in h:
changelog.write(line)
with open(changelog_cache, 'w') as f:
cache_head = 'DO NOT EDIT THIS FILE BY HAND! AUTOMATICALLY GENERATED BY ss13_genchangelog.py.'
yaml.dump_all([cache_head, all_changelog_entries], f, default_flow_style=False)
if len(del_after):
print('Cleaning up...')
for fileName in del_after:
if os.path.isfile(fileName):
print(' Deleting {0} (delete-after set)...'.format(fileName))
os.remove(fileName)
| agpl-3.0 |
opensourcechipspark/platform_external_chromium_org | native_client_sdk/src/build_tools/sdk_tools/config.py | 31 | 2138 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import urlparse
from sdk_update_common import Error
SOURCE_WHITELIST = [
'http://localhost/', # For testing.
'https://commondatastorage.googleapis.com/nativeclient-mirror/nacl/nacl_sdk',
]
def IsSourceValid(url):
# E1101: Instance of 'ParseResult' has no 'scheme' member
# pylint: disable=E1101
given = urlparse.urlparse(url)
for allowed_url in SOURCE_WHITELIST:
allowed = urlparse.urlparse(allowed_url)
if (given.scheme == allowed.scheme and
given.hostname == allowed.hostname and
given.path.startswith(allowed.path)):
return True
return False
class Config(dict):
def __init__(self, data=None):
dict.__init__(self)
if data:
self.update(data)
else:
self.sources = []
def LoadJson(self, json_data):
try:
self.update(json.loads(json_data))
except Exception as e:
raise Error('Error reading json config:\n%s' % str(e))
def ToJson(self):
try:
return json.dumps(self, sort_keys=False, indent=2)
except Exception as e:
raise Error('Json encoding error writing config:\n%s' % e)
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError('Config does not contain: %s' % name)
def __setattr__(self, name, value):
self[name] = value
def AddSource(self, source):
if not IsSourceValid(source):
logging.warn('Only whitelisted sources are allowed. Ignoring \"%s\".' % (
source,))
return
if source in self.sources:
logging.info('Source \"%s\" already in Config.' % (source,))
return
self.sources.append(source)
def RemoveSource(self, source):
if source not in self.sources:
logging.warn('Source \"%s\" not in Config.' % (source,))
return
self.sources.remove(source)
def RemoveAllSources(self):
if not self.sources:
logging.info('No sources to remove.')
return
self.sources = []
| bsd-3-clause |
caslei/TfModels | im2txt/im2txt/ops/image_processing.py | 30 | 4940 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for image preprocessing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def distort_image(image, thread_id):
"""Perform random distortions on an image.
Args:
image: A float32 Tensor of shape [height, width, 3] with values in [0, 1).
thread_id: Preprocessing thread id used to select the ordering of color
distortions. There should be a multiple of 2 preprocessing threads.
Returns:
distorted_image: A float32 Tensor of shape [height, width, 3] with values in
[0, 1].
"""
# Randomly flip horizontally.
with tf.name_scope("flip_horizontal", values=[image]):
image = tf.image.random_flip_left_right(image)
# Randomly distort the colors based on thread id.
color_ordering = thread_id % 2
with tf.name_scope("distort_color", values=[image]):
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.032)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.032)
# The random_* ops do not necessarily clamp.
image = tf.clip_by_value(image, 0.0, 1.0)
return image
def process_image(encoded_image,
is_training,
height,
width,
resize_height=346,
resize_width=346,
thread_id=0,
image_format="jpeg"):
"""Decode an image, resize and apply random distortions.
In training, images are distorted slightly differently depending on thread_id.
Args:
encoded_image: String Tensor containing the image.
is_training: Boolean; whether preprocessing for training or eval.
height: Height of the output image.
width: Width of the output image.
resize_height: If > 0, resize height before crop to final dimensions.
resize_width: If > 0, resize width before crop to final dimensions.
thread_id: Preprocessing thread id used to select the ordering of color
distortions. There should be a multiple of 2 preprocessing threads.
image_format: "jpeg" or "png".
Returns:
A float32 Tensor of shape [height, width, 3] with values in [-1, 1].
Raises:
ValueError: If image_format is invalid.
"""
# Helper function to log an image summary to the visualizer. Summaries are
# only logged in thread 0.
def image_summary(name, image):
if not thread_id:
tf.summary.image(name, tf.expand_dims(image, 0))
# Decode image into a float32 Tensor of shape [?, ?, 3] with values in [0, 1).
with tf.name_scope("decode", values=[encoded_image]):
if image_format == "jpeg":
image = tf.image.decode_jpeg(encoded_image, channels=3)
elif image_format == "png":
image = tf.image.decode_png(encoded_image, channels=3)
else:
raise ValueError("Invalid image format: %s" % image_format)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image_summary("original_image", image)
# Resize image.
assert (resize_height > 0) == (resize_width > 0)
if resize_height:
image = tf.image.resize_images(image,
size=[resize_height, resize_width],
method=tf.image.ResizeMethod.BILINEAR)
# Crop to final dimensions.
if is_training:
image = tf.random_crop(image, [height, width, 3])
else:
# Central crop, assuming resize_height > height, resize_width > width.
image = tf.image.resize_image_with_crop_or_pad(image, height, width)
image_summary("resized_image", image)
# Randomly distort the image.
if is_training:
image = distort_image(image, thread_id)
image_summary("final_image", image)
# Rescale to [-1,1] instead of [0, 1]
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
| apache-2.0 |
epssy/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/admin/models.py | 104 | 3027 | from __future__ import unicode_literals
from django.db import models
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.admin.util import quote
from django.core.urlresolvers import reverse, NoReverseMatch
from django.utils.translation import ugettext, ugettext_lazy as _
from django.utils.encoding import smart_text
from django.utils.encoding import python_2_unicode_compatible
ADDITION = 1
CHANGE = 2
DELETION = 3
class LogEntryManager(models.Manager):
def log_action(self, user_id, content_type_id, object_id, object_repr, action_flag, change_message=''):
e = self.model(None, None, user_id, content_type_id, smart_text(object_id), object_repr[:200], action_flag, change_message)
e.save()
@python_2_unicode_compatible
class LogEntry(models.Model):
action_time = models.DateTimeField(_('action time'), auto_now=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
content_type = models.ForeignKey(ContentType, blank=True, null=True)
object_id = models.TextField(_('object id'), blank=True, null=True)
object_repr = models.CharField(_('object repr'), max_length=200)
action_flag = models.PositiveSmallIntegerField(_('action flag'))
change_message = models.TextField(_('change message'), blank=True)
objects = LogEntryManager()
class Meta:
verbose_name = _('log entry')
verbose_name_plural = _('log entries')
db_table = 'django_admin_log'
ordering = ('-action_time',)
def __repr__(self):
return smart_text(self.action_time)
def __str__(self):
if self.action_flag == ADDITION:
return ugettext('Added "%(object)s".') % {'object': self.object_repr}
elif self.action_flag == CHANGE:
return ugettext('Changed "%(object)s" - %(changes)s') % {
'object': self.object_repr,
'changes': self.change_message,
}
elif self.action_flag == DELETION:
return ugettext('Deleted "%(object)s."') % {'object': self.object_repr}
return ugettext('LogEntry Object')
def is_addition(self):
return self.action_flag == ADDITION
def is_change(self):
return self.action_flag == CHANGE
def is_deletion(self):
return self.action_flag == DELETION
def get_edited_object(self):
"Returns the edited object represented by this log entry"
return self.content_type.get_object_for_this_type(pk=self.object_id)
def get_admin_url(self):
"""
Returns the admin URL to edit the object represented by this log entry.
This is relative to the Django admin index page.
"""
if self.content_type and self.object_id:
url_name = 'admin:%s_%s_change' % (self.content_type.app_label, self.content_type.model)
try:
return reverse(url_name, args=(quote(self.object_id),))
except NoReverseMatch:
pass
return None
| apache-2.0 |
ibamacsr/django_gis_states | setup.py | 2 | 1207 | from codecs import open as codecs_open
from setuptools import setup, find_packages
# Get the long description from the relevant file
with codecs_open('README.rst', encoding='utf-8') as f:
long_description = f.read()
setup(name='django-gis-states',
version='0.1',
description="""Django app that provides gis models to applications that
need to deal with State and Cities""",
long_description=long_description,
classifiers=[
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering :: GIS',
'Topic :: Utilities',
'Framework :: Django',
'Environment :: Web Environment',
],
keywords=['gis', 'states', 'cities', 'django', 'geodjango'],
author="Wille Marcel",
author_email='wille@wille.blog.br',
url='http://git.ibama.gov.br/csr/django-gis-states',
license='GPLv3+',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'django',
'simplejson'
]
)
| agpl-3.0 |
pekeler/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/encodings/shift_jisx0213.py | 816 | 1059 | #
# shift_jisx0213.py: Python Unicode Codec for SHIFT_JISX0213
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('shift_jisx0213')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='shift_jisx0213',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
daavery/audacity | lib-src/lv2/lv2/plugins/eg-midigate.lv2/waflib/Configure.py | 181 | 9880 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,shlex,sys,time
from waflib import ConfigSet,Utils,Options,Logs,Context,Build,Errors
try:
from urllib import request
except ImportError:
from urllib import urlopen
else:
urlopen=request.urlopen
BREAK='break'
CONTINUE='continue'
WAF_CONFIG_LOG='config.log'
autoconfig=False
conf_template='''# project %(app)s configured on %(now)s by
# waf %(wafver)s (abi %(abi)s, python %(pyver)x on %(systype)s)
# using %(args)s
#'''
def download_check(node):
pass
def download_tool(tool,force=False,ctx=None):
for x in Utils.to_list(Context.remote_repo):
for sub in Utils.to_list(Context.remote_locs):
url='/'.join((x,sub,tool+'.py'))
try:
web=urlopen(url)
try:
if web.getcode()!=200:
continue
except AttributeError:
pass
except Exception:
continue
else:
tmp=ctx.root.make_node(os.sep.join((Context.waf_dir,'waflib','extras',tool+'.py')))
tmp.write(web.read(),'wb')
Logs.warn('Downloaded %s from %s'%(tool,url))
download_check(tmp)
try:
module=Context.load_tool(tool)
except Exception:
Logs.warn('The tool %s from %s is unusable'%(tool,url))
try:
tmp.delete()
except Exception:
pass
continue
return module
raise Errors.WafError('Could not load the Waf tool')
class ConfigurationContext(Context.Context):
'''configures the project'''
cmd='configure'
error_handlers=[]
def __init__(self,**kw):
super(ConfigurationContext,self).__init__(**kw)
self.environ=dict(os.environ)
self.all_envs={}
self.top_dir=None
self.out_dir=None
self.tools=[]
self.hash=0
self.files=[]
self.tool_cache=[]
self.setenv('')
def setenv(self,name,env=None):
if name not in self.all_envs or env:
if not env:
env=ConfigSet.ConfigSet()
self.prepare_env(env)
else:
env=env.derive()
self.all_envs[name]=env
self.variant=name
def get_env(self):
return self.all_envs[self.variant]
def set_env(self,val):
self.all_envs[self.variant]=val
env=property(get_env,set_env)
def init_dirs(self):
top=self.top_dir
if not top:
top=Options.options.top
if not top:
top=getattr(Context.g_module,Context.TOP,None)
if not top:
top=self.path.abspath()
top=os.path.abspath(top)
self.srcnode=(os.path.isabs(top)and self.root or self.path).find_dir(top)
assert(self.srcnode)
out=self.out_dir
if not out:
out=Options.options.out
if not out:
out=getattr(Context.g_module,Context.OUT,None)
if not out:
out=Options.lockfile.replace('.lock-waf_%s_'%sys.platform,'').replace('.lock-waf','')
self.bldnode=(os.path.isabs(out)and self.root or self.path).make_node(out)
self.bldnode.mkdir()
if not os.path.isdir(self.bldnode.abspath()):
conf.fatal('Could not create the build directory %s'%self.bldnode.abspath())
def execute(self):
self.init_dirs()
self.cachedir=self.bldnode.make_node(Build.CACHE_DIR)
self.cachedir.mkdir()
path=os.path.join(self.bldnode.abspath(),WAF_CONFIG_LOG)
self.logger=Logs.make_logger(path,'cfg')
app=getattr(Context.g_module,'APPNAME','')
if app:
ver=getattr(Context.g_module,'VERSION','')
if ver:
app="%s (%s)"%(app,ver)
now=time.ctime()
pyver=sys.hexversion
systype=sys.platform
args=" ".join(sys.argv)
wafver=Context.WAFVERSION
abi=Context.ABI
self.to_log(conf_template%vars())
self.msg('Setting top to',self.srcnode.abspath())
self.msg('Setting out to',self.bldnode.abspath())
if id(self.srcnode)==id(self.bldnode):
Logs.warn('Setting top == out (remember to use "update_outputs")')
elif id(self.path)!=id(self.srcnode):
if self.srcnode.is_child_of(self.path):
Logs.warn('Are you certain that you do not want to set top="." ?')
super(ConfigurationContext,self).execute()
self.store()
Context.top_dir=self.srcnode.abspath()
Context.out_dir=self.bldnode.abspath()
env=ConfigSet.ConfigSet()
env['argv']=sys.argv
env['options']=Options.options.__dict__
env.run_dir=Context.run_dir
env.top_dir=Context.top_dir
env.out_dir=Context.out_dir
env['hash']=self.hash
env['files']=self.files
env['environ']=dict(self.environ)
if not self.env.NO_LOCK_IN_RUN:
env.store(Context.run_dir+os.sep+Options.lockfile)
if not self.env.NO_LOCK_IN_TOP:
env.store(Context.top_dir+os.sep+Options.lockfile)
if not self.env.NO_LOCK_IN_OUT:
env.store(Context.out_dir+os.sep+Options.lockfile)
def prepare_env(self,env):
if not env.PREFIX:
if Options.options.prefix or Utils.is_win32:
env.PREFIX=os.path.abspath(os.path.expanduser(Options.options.prefix))
else:
env.PREFIX=''
if not env.BINDIR:
env.BINDIR=Utils.subst_vars('${PREFIX}/bin',env)
if not env.LIBDIR:
env.LIBDIR=Utils.subst_vars('${PREFIX}/lib',env)
def store(self):
n=self.cachedir.make_node('build.config.py')
n.write('version = 0x%x\ntools = %r\n'%(Context.HEXVERSION,self.tools))
if not self.all_envs:
self.fatal('nothing to store in the configuration context!')
for key in self.all_envs:
tmpenv=self.all_envs[key]
tmpenv.store(os.path.join(self.cachedir.abspath(),key+Build.CACHE_SUFFIX))
def load(self,input,tooldir=None,funs=None,download=True):
tools=Utils.to_list(input)
if tooldir:tooldir=Utils.to_list(tooldir)
for tool in tools:
mag=(tool,id(self.env),funs)
if mag in self.tool_cache:
self.to_log('(tool %s is already loaded, skipping)'%tool)
continue
self.tool_cache.append(mag)
module=None
try:
module=Context.load_tool(tool,tooldir)
except ImportError ,e:
if Options.options.download:
module=download_tool(tool,ctx=self)
if not module:
self.fatal('Could not load the Waf tool %r or download a suitable replacement from the repository (sys.path %r)\n%s'%(tool,sys.path,e))
else:
self.fatal('Could not load the Waf tool %r from %r (try the --download option?):\n%s'%(tool,sys.path,e))
except Exception ,e:
self.to_log('imp %r (%r & %r)'%(tool,tooldir,funs))
self.to_log(Utils.ex_stack())
raise
if funs is not None:
self.eval_rules(funs)
else:
func=getattr(module,'configure',None)
if func:
if type(func)is type(Utils.readf):func(self)
else:self.eval_rules(func)
self.tools.append({'tool':tool,'tooldir':tooldir,'funs':funs})
def post_recurse(self,node):
super(ConfigurationContext,self).post_recurse(node)
self.hash=Utils.h_list((self.hash,node.read('rb')))
self.files.append(node.abspath())
def eval_rules(self,rules):
self.rules=Utils.to_list(rules)
for x in self.rules:
f=getattr(self,x)
if not f:self.fatal("No such method '%s'."%x)
try:
f()
except Exception ,e:
ret=self.err_handler(x,e)
if ret==BREAK:
break
elif ret==CONTINUE:
continue
else:
raise
def err_handler(self,fun,error):
pass
def conf(f):
def fun(*k,**kw):
mandatory=True
if'mandatory'in kw:
mandatory=kw['mandatory']
del kw['mandatory']
try:
return f(*k,**kw)
except Errors.ConfigurationError:
if mandatory:
raise
setattr(ConfigurationContext,f.__name__,fun)
setattr(Build.BuildContext,f.__name__,fun)
return f
@conf
def add_os_flags(self,var,dest=None):
try:self.env.append_value(dest or var,shlex.split(self.environ[var]))
except KeyError:pass
@conf
def cmd_to_list(self,cmd):
if isinstance(cmd,str)and cmd.find(' '):
try:
os.stat(cmd)
except OSError:
return shlex.split(cmd)
else:
return[cmd]
return cmd
@conf
def check_waf_version(self,mini='1.6.99',maxi='1.8.0'):
self.start_msg('Checking for waf version in %s-%s'%(str(mini),str(maxi)))
ver=Context.HEXVERSION
if Utils.num2ver(mini)>ver:
self.fatal('waf version should be at least %r (%r found)'%(Utils.num2ver(mini),ver))
if Utils.num2ver(maxi)<ver:
self.fatal('waf version should be at most %r (%r found)'%(Utils.num2ver(maxi),ver))
self.end_msg('ok')
@conf
def find_file(self,filename,path_list=[]):
for n in Utils.to_list(filename):
for d in Utils.to_list(path_list):
p=os.path.join(d,n)
if os.path.exists(p):
return p
self.fatal('Could not find %r'%filename)
@conf
def find_program(self,filename,**kw):
exts=kw.get('exts',Utils.is_win32 and'.exe,.com,.bat,.cmd'or',.sh,.pl,.py')
environ=kw.get('environ',os.environ)
ret=''
filename=Utils.to_list(filename)
var=kw.get('var','')
if not var:
var=filename[0].upper()
if self.env[var]:
ret=self.env[var]
elif var in environ:
ret=environ[var]
path_list=kw.get('path_list','')
if not ret:
if path_list:
path_list=Utils.to_list(path_list)
else:
path_list=environ.get('PATH','').split(os.pathsep)
if not isinstance(filename,list):
filename=[filename]
for a in exts.split(','):
if ret:
break
for b in filename:
if ret:
break
for c in path_list:
if ret:
break
x=os.path.expanduser(os.path.join(c,b+a))
if os.path.isfile(x):
ret=x
if not ret and Utils.winreg:
ret=Utils.get_registry_app_path(Utils.winreg.HKEY_CURRENT_USER,filename)
if not ret and Utils.winreg:
ret=Utils.get_registry_app_path(Utils.winreg.HKEY_LOCAL_MACHINE,filename)
self.msg('Checking for program '+','.join(filename),ret or False)
self.to_log('find program=%r paths=%r var=%r -> %r'%(filename,path_list,var,ret))
if not ret:
self.fatal(kw.get('errmsg','')or'Could not find the program %s'%','.join(filename))
if var:
self.env[var]=ret
return ret
@conf
def find_perl_program(self,filename,path_list=[],var=None,environ=None,exts=''):
try:
app=self.find_program(filename,path_list=path_list,var=var,environ=environ,exts=exts)
except Exception:
self.find_program('perl',var='PERL')
app=self.find_file(filename,os.environ['PATH'].split(os.pathsep))
if not app:
raise
if var:
self.env[var]=Utils.to_list(self.env['PERL'])+[app]
self.msg('Checking for %r'%filename,app)
| gpl-2.0 |
wunderlins/learning | python/zodb/lib/osx/zodbpickle/tests/pickletester_2.py | 2 | 47879 | import io
import unittest
import StringIO
import cStringIO
import copy_reg
import sys
try:
from unittest import skipIf
except ImportError:
def skipIf(condition, message):
def _decorator(fn):
if condition:
return fn
else:
def skipped(self):
pass
skipped.__doc__ = '%s skipped: %s' % (fn.__name__, message)
return skipped
return _decorator
from test.test_support import TestFailed, have_unicode, TESTFN
try:
from test.test_support import _2G, _1M, precisionbigmemtest
except ImportError:
# this import might fail when run on older Python versions by test_xpickle
_2G = _1M = 0
def precisionbigmemtest(*args, **kwargs):
return lambda self: None
from . import _is_pypy
from . import _is_pure
from zodbpickle import pickle_2 as pickle
try:
from zodbpickle import _pickle as cPickle
has_c_implementation = not _is_pypy and not _is_pure
except ImportError:
cPickle = pickle
has_c_implementation = False
from zodbpickle import pickletools_2 as pickletools
# Tests that try a number of pickle protocols should have a
# for proto in protocols:
# kind of outer loop.
assert pickle.HIGHEST_PROTOCOL == cPickle.HIGHEST_PROTOCOL == 3
protocols = range(pickle.HIGHEST_PROTOCOL + 1)
# Copy of test.test_support.run_with_locale. This is needed to support Python
# 2.4, which didn't include it. This is all to support test_xpickle, which
# bounces pickled objects through older Python versions to test backwards
# compatibility.
def run_with_locale(catstr, *locales):
def decorator(func):
def inner(*args, **kwds):
try:
import locale
category = getattr(locale, catstr)
orig_locale = locale.setlocale(category)
except AttributeError:
# if the test author gives us an invalid category string
raise
except:
# cannot retrieve original locale, so do nothing
locale = orig_locale = None
else:
for loc in locales:
try:
locale.setlocale(category, loc)
break
except:
pass
# now run the function, resetting the locale on exceptions
try:
return func(*args, **kwds)
finally:
if locale and orig_locale:
locale.setlocale(category, orig_locale)
inner.func_name = func.func_name
inner.__doc__ = func.__doc__
return inner
return decorator
# Return True if opcode code appears in the pickle, else False.
def opcode_in_pickle(code, pickle):
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code:
return True
return False
# Return the number of times opcode code appears in pickle.
def count_opcode(code, pickle):
n = 0
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code:
n += 1
return n
# We can't very well test the extension registry without putting known stuff
# in it, but we have to be careful to restore its original state. Code
# should do this:
#
# e = ExtensionSaver(extension_code)
# try:
# fiddle w/ the extension registry's stuff for extension_code
# finally:
# e.restore()
class ExtensionSaver:
# Remember current registration for code (if any), and remove it (if
# there is one).
def __init__(self, code):
self.code = code
if code in copy_reg._inverted_registry:
self.pair = copy_reg._inverted_registry[code]
copy_reg.remove_extension(self.pair[0], self.pair[1], code)
else:
self.pair = None
# Restore previous registration for code.
def restore(self):
code = self.code
curpair = copy_reg._inverted_registry.get(code)
if curpair is not None:
copy_reg.remove_extension(curpair[0], curpair[1], code)
pair = self.pair
if pair is not None:
copy_reg.add_extension(pair[0], pair[1], code)
class C:
def __cmp__(self, other):
return cmp(self.__dict__, other.__dict__)
import __main__
__main__.C = C
C.__module__ = "__main__"
class myint(int):
def __init__(self, x):
self.str = str(x)
class initarg(C):
def __init__(self, a, b):
self.a = a
self.b = b
def __getinitargs__(self):
return self.a, self.b
class metaclass(type):
pass
class use_metaclass(object):
__metaclass__ = metaclass
class pickling_metaclass(type):
def __eq__(self, other):
return (type(self) == type(other) and
self.reduce_args == other.reduce_args)
def __reduce__(self):
return (create_dynamic_class, self.reduce_args)
__hash__ = None
def create_dynamic_class(name, bases):
result = pickling_metaclass(name, bases, dict())
result.reduce_args = (name, bases)
return result
# DATA0 .. DATA2 are the pickles we expect under the various protocols, for
# the object returned by create_data().
# break into multiple strings to avoid confusing font-lock-mode
DATA0 = """(lp1
I0
aL1L
aF2
ac__builtin__
complex
p2
""" + \
"""(F3
F0
tRp3
aI1
aI-1
aI255
aI-255
aI-256
aI65535
aI-65535
aI-65536
aI2147483647
aI-2147483647
aI-2147483648
a""" + \
"""(S'abc'
p4
g4
""" + \
"""(i__main__
C
p5
""" + \
"""(dp6
S'foo'
p7
I1
sS'bar'
p8
I2
sbg5
tp9
ag9
aI5
a.
"""
# Disassembly of DATA0.
DATA0_DIS = """\
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 1
5: I INT 0
8: a APPEND
9: L LONG 1L
13: a APPEND
14: F FLOAT 2.0
17: a APPEND
18: c GLOBAL '__builtin__ complex'
39: p PUT 2
42: ( MARK
43: F FLOAT 3.0
46: F FLOAT 0.0
49: t TUPLE (MARK at 42)
50: R REDUCE
51: p PUT 3
54: a APPEND
55: I INT 1
58: a APPEND
59: I INT -1
63: a APPEND
64: I INT 255
69: a APPEND
70: I INT -255
76: a APPEND
77: I INT -256
83: a APPEND
84: I INT 65535
91: a APPEND
92: I INT -65535
100: a APPEND
101: I INT -65536
109: a APPEND
110: I INT 2147483647
122: a APPEND
123: I INT -2147483647
136: a APPEND
137: I INT -2147483648
150: a APPEND
151: ( MARK
152: S STRING 'abc'
159: p PUT 4
162: g GET 4
165: ( MARK
166: i INST '__main__ C' (MARK at 165)
178: p PUT 5
181: ( MARK
182: d DICT (MARK at 181)
183: p PUT 6
186: S STRING 'foo'
193: p PUT 7
196: I INT 1
199: s SETITEM
200: S STRING 'bar'
207: p PUT 8
210: I INT 2
213: s SETITEM
214: b BUILD
215: g GET 5
218: t TUPLE (MARK at 151)
219: p PUT 9
222: a APPEND
223: g GET 9
226: a APPEND
227: I INT 5
230: a APPEND
231: . STOP
highest protocol among opcodes = 0
"""
DATA1 = (']q\x01(K\x00L1L\nG@\x00\x00\x00\x00\x00\x00\x00'
'c__builtin__\ncomplex\nq\x02(G@\x08\x00\x00\x00\x00\x00'
'\x00G\x00\x00\x00\x00\x00\x00\x00\x00tRq\x03K\x01J\xff\xff'
'\xff\xffK\xffJ\x01\xff\xff\xffJ\x00\xff\xff\xffM\xff\xff'
'J\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff\xff\x7fJ\x01\x00'
'\x00\x80J\x00\x00\x00\x80(U\x03abcq\x04h\x04(c__main__\n'
'C\nq\x05oq\x06}q\x07(U\x03fooq\x08K\x01U\x03barq\tK\x02ubh'
'\x06tq\nh\nK\x05e.'
)
# Disassembly of DATA1.
DATA1_DIS = """\
0: ] EMPTY_LIST
1: q BINPUT 1
3: ( MARK
4: K BININT1 0
6: L LONG 1L
10: G BINFLOAT 2.0
19: c GLOBAL '__builtin__ complex'
40: q BINPUT 2
42: ( MARK
43: G BINFLOAT 3.0
52: G BINFLOAT 0.0
61: t TUPLE (MARK at 42)
62: R REDUCE
63: q BINPUT 3
65: K BININT1 1
67: J BININT -1
72: K BININT1 255
74: J BININT -255
79: J BININT -256
84: M BININT2 65535
87: J BININT -65535
92: J BININT -65536
97: J BININT 2147483647
102: J BININT -2147483647
107: J BININT -2147483648
112: ( MARK
113: U SHORT_BINSTRING 'abc'
118: q BINPUT 4
120: h BINGET 4
122: ( MARK
123: c GLOBAL '__main__ C'
135: q BINPUT 5
137: o OBJ (MARK at 122)
138: q BINPUT 6
140: } EMPTY_DICT
141: q BINPUT 7
143: ( MARK
144: U SHORT_BINSTRING 'foo'
149: q BINPUT 8
151: K BININT1 1
153: U SHORT_BINSTRING 'bar'
158: q BINPUT 9
160: K BININT1 2
162: u SETITEMS (MARK at 143)
163: b BUILD
164: h BINGET 6
166: t TUPLE (MARK at 112)
167: q BINPUT 10
169: h BINGET 10
171: K BININT1 5
173: e APPENDS (MARK at 3)
174: . STOP
highest protocol among opcodes = 1
"""
DATA2 = ('\x80\x02]q\x01(K\x00\x8a\x01\x01G@\x00\x00\x00\x00\x00\x00\x00'
'c__builtin__\ncomplex\nq\x02G@\x08\x00\x00\x00\x00\x00\x00G\x00'
'\x00\x00\x00\x00\x00\x00\x00\x86Rq\x03K\x01J\xff\xff\xff\xffK'
'\xffJ\x01\xff\xff\xffJ\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xff'
'J\x00\x00\xff\xffJ\xff\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00'
'\x80(U\x03abcq\x04h\x04(c__main__\nC\nq\x05oq\x06}q\x07(U\x03foo'
'q\x08K\x01U\x03barq\tK\x02ubh\x06tq\nh\nK\x05e.')
# Disassembly of DATA2.
DATA2_DIS = """\
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 1
5: ( MARK
6: K BININT1 0
8: \x8a LONG1 1L
11: G BINFLOAT 2.0
20: c GLOBAL '__builtin__ complex'
41: q BINPUT 2
43: G BINFLOAT 3.0
52: G BINFLOAT 0.0
61: \x86 TUPLE2
62: R REDUCE
63: q BINPUT 3
65: K BININT1 1
67: J BININT -1
72: K BININT1 255
74: J BININT -255
79: J BININT -256
84: M BININT2 65535
87: J BININT -65535
92: J BININT -65536
97: J BININT 2147483647
102: J BININT -2147483647
107: J BININT -2147483648
112: ( MARK
113: U SHORT_BINSTRING 'abc'
118: q BINPUT 4
120: h BINGET 4
122: ( MARK
123: c GLOBAL '__main__ C'
135: q BINPUT 5
137: o OBJ (MARK at 122)
138: q BINPUT 6
140: } EMPTY_DICT
141: q BINPUT 7
143: ( MARK
144: U SHORT_BINSTRING 'foo'
149: q BINPUT 8
151: K BININT1 1
153: U SHORT_BINSTRING 'bar'
158: q BINPUT 9
160: K BININT1 2
162: u SETITEMS (MARK at 143)
163: b BUILD
164: h BINGET 6
166: t TUPLE (MARK at 112)
167: q BINPUT 10
169: h BINGET 10
171: K BININT1 5
173: e APPENDS (MARK at 5)
174: . STOP
highest protocol among opcodes = 2
"""
def create_data():
c = C()
c.foo = 1
c.bar = 2
x = [0, 1L, 2.0, 3.0+0j]
# Append some integer test cases at cPickle.c's internal size
# cutoffs.
uint1max = 0xff
uint2max = 0xffff
int4max = 0x7fffffff
x.extend([1, -1,
uint1max, -uint1max, -uint1max-1,
uint2max, -uint2max, -uint2max-1,
int4max, -int4max, -int4max-1])
y = ('abc', 'abc', c, c)
x.append(y)
x.append(y)
x.append(5)
return x
class AbstractPickleTests(unittest.TestCase):
# Subclass must define self.dumps, self.loads, self.error.
_testdata = create_data()
def setUp(self):
pass
def test_misc(self):
# test various datatypes not tested by testdata
for proto in protocols:
x = myint(4)
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
x = (1, ())
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
x = initarg(1, x)
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
# XXX test __reduce__ protocol?
def test_roundtrip_equality(self):
expected = self._testdata
for proto in protocols:
s = self.dumps(expected, proto)
got = self.loads(s)
self.assertEqual(expected, got)
def test_load_from_canned_string(self):
expected = self._testdata
for canned in DATA0, DATA1, DATA2:
got = self.loads(canned)
self.assertEqual(expected, got)
# There are gratuitous differences between pickles produced by
# pickle and cPickle, largely because cPickle starts PUT indices at
# 1 and pickle starts them at 0. See XXX comment in cPickle's put2() --
# there's a comment with an exclamation point there whose meaning
# is a mystery. cPickle also suppresses PUT for objects with a refcount
# of 1.
def dont_test_disassembly(self):
from pickletools import dis
for proto, expected in (0, DATA0_DIS), (1, DATA1_DIS):
s = self.dumps(self._testdata, proto)
filelike = cStringIO.StringIO()
dis(s, out=filelike)
got = filelike.getvalue()
self.assertEqual(expected, got)
def test_recursive_list(self):
l = []
l.append(l)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertEqual(len(x), 1)
self.assertTrue(x is x[0])
def test_recursive_tuple(self):
t = ([],)
t[0].append(t)
for proto in protocols:
s = self.dumps(t, proto)
x = self.loads(s)
self.assertEqual(len(x), 1)
self.assertEqual(len(x[0]), 1)
self.assertTrue(x is x[0][0])
def test_recursive_dict(self):
d = {}
d[1] = d
for proto in protocols:
s = self.dumps(d, proto)
x = self.loads(s)
self.assertEqual(x.keys(), [1])
self.assertTrue(x[1] is x)
def test_recursive_inst(self):
i = C()
i.attr = i
for proto in protocols:
s = self.dumps(i, proto)
x = self.loads(s)
self.assertEqual(dir(x), dir(i))
self.assertIs(x.attr, x)
def test_recursive_multi(self):
l = []
d = {1:l}
i = C()
i.attr = d
l.append(i)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertEqual(len(x), 1)
self.assertEqual(dir(x[0]), dir(i))
self.assertEqual(x[0].attr.keys(), [1])
self.assertTrue(x[0].attr[1] is x)
def test_garyp(self):
self.assertRaises(self.error, self.loads, 'garyp')
def test_insecure_strings(self):
insecure = ["abc", "2 + 2", # not quoted
#"'abc' + 'def'", # not a single quoted string
"'abc", # quote is not closed
"'abc\"", # open quote and close quote don't match
"'abc' ?", # junk after close quote
"'\\'", # trailing backslash
"'", # issue #17710
"' ", # issue #17710
# some tests of the quoting rules
#"'abc\"\''",
#"'\\\\a\'\'\'\\\'\\\\\''",
]
for s in insecure:
buf = "S" + s + "\012p0\012."
self.assertRaises(ValueError, self.loads, buf)
if have_unicode:
def test_unicode(self):
endcases = [u'', u'<\\u>', u'<\\\u1234>', u'<\n>',
u'<\\>', u'<\\\U00012345>']
for proto in protocols:
for u in endcases:
p = self.dumps(u, proto)
u2 = self.loads(p)
self.assertEqual(u2, u)
def test_unicode_high_plane(self):
t = u'\U00012345'
for proto in protocols:
p = self.dumps(t, proto)
t2 = self.loads(p)
self.assertEqual(t2, t)
def test_ints(self):
import sys
for proto in protocols:
n = sys.maxint
while n:
for expected in (-n, n):
s = self.dumps(expected, proto)
n2 = self.loads(s)
self.assertEqual(expected, n2)
n = n >> 1
def test_maxint64(self):
maxint64 = (1L << 63) - 1
data = 'I' + str(maxint64) + '\n.'
got = self.loads(data)
self.assertEqual(got, maxint64)
# Try too with a bogus literal.
data = 'I' + str(maxint64) + 'JUNK\n.'
self.assertRaises(ValueError, self.loads, data)
def test_long(self):
for proto in protocols:
# 256 bytes is where LONG4 begins.
for nbits in 1, 8, 8*254, 8*255, 8*256, 8*257:
nbase = 1L << nbits
for npos in nbase-1, nbase, nbase+1:
for n in npos, -npos:
pickle = self.dumps(n, proto)
got = self.loads(pickle)
self.assertEqual(n, got)
# Try a monster. This is quadratic-time in protos 0 & 1, so don't
# bother with those.
nbase = long("deadbeeffeedface", 16)
nbase += nbase << 1000000
for n in nbase, -nbase:
p = self.dumps(n, 2)
got = self.loads(p)
self.assertEqual(n, got)
def test_float(self):
test_values = [0.0, 4.94e-324, 1e-310, 7e-308, 6.626e-34, 0.1, 0.5,
3.14, 263.44582062374053, 6.022e23, 1e30]
test_values = test_values + [-x for x in test_values]
for proto in protocols:
for value in test_values:
pickle = self.dumps(value, proto)
got = self.loads(pickle)
self.assertEqual(value, got)
@run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
def test_float_format(self):
# make sure that floats are formatted locale independent
self.assertEqual(self.dumps(1.2)[0:3], 'F1.')
def test_reduce(self):
pass
def test_getinitargs(self):
pass
def test_metaclass(self):
a = use_metaclass()
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a.__class__, b.__class__)
def test_dynamic_class(self):
a = create_dynamic_class("my_dynamic_class", (object,))
copy_reg.pickle(pickling_metaclass, pickling_metaclass.__reduce__)
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a, b)
def test_structseq(self):
import time
import os
t = time.localtime()
for proto in protocols:
s = self.dumps(t, proto)
u = self.loads(s)
self.assertEqual(t, u)
if hasattr(os, "stat"):
t = os.stat(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assertEqual(t, u)
if hasattr(os, "statvfs"):
t = os.statvfs(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assertEqual(t, u)
# Tests for protocol 2
def test_proto(self):
build_none = pickle.NONE + pickle.STOP
for proto in protocols:
expected = build_none
if proto >= 2:
expected = pickle.PROTO + chr(proto) + expected
p = self.dumps(None, proto)
self.assertEqual(p, expected)
oob = protocols[-1] + 1 # a future protocol
badpickle = pickle.PROTO + chr(oob) + build_none
try:
self.loads(badpickle)
except ValueError, detail:
self.assertTrue(str(detail).startswith(
"unsupported pickle protocol"))
else:
self.fail("expected bad protocol number to raise ValueError")
def test_long1(self):
x = 12345678910111213141516178920L
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG1, s), proto >= 2)
def test_long4(self):
x = 12345678910111213141516178920L << (256*8)
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG4, s), proto >= 2)
def test_shortbinbytes(self):
from zodbpickle import binary
x = binary(b'\x00ABC\x80')
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
self.assertEqual(opcode_in_pickle(pickle.SHORT_BINBYTES, s),
proto >= 3, str(self.__class__))
def test_binbytes(self):
from zodbpickle import binary
x = binary(b'\x00ABC\x80' * 100)
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
self.assertEqual(opcode_in_pickle(pickle.BINBYTES, s),
proto >= 3, str(self.__class__))
def test_short_tuples(self):
# Map (proto, len(tuple)) to expected opcode.
expected_opcode = {(0, 0): pickle.TUPLE,
(0, 1): pickle.TUPLE,
(0, 2): pickle.TUPLE,
(0, 3): pickle.TUPLE,
(0, 4): pickle.TUPLE,
(1, 0): pickle.EMPTY_TUPLE,
(1, 1): pickle.TUPLE,
(1, 2): pickle.TUPLE,
(1, 3): pickle.TUPLE,
(1, 4): pickle.TUPLE,
(2, 0): pickle.EMPTY_TUPLE,
(2, 1): pickle.TUPLE1,
(2, 2): pickle.TUPLE2,
(2, 3): pickle.TUPLE3,
(2, 4): pickle.TUPLE,
(3, 0): pickle.EMPTY_TUPLE,
(3, 1): pickle.TUPLE1,
(3, 2): pickle.TUPLE2,
(3, 3): pickle.TUPLE3,
(3, 4): pickle.TUPLE,
}
a = ()
b = (1,)
c = (1, 2)
d = (1, 2, 3)
e = (1, 2, 3, 4)
for proto in protocols:
for x in a, b, c, d, e:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y, (proto, x, s, y))
expected = expected_opcode[proto, len(x)]
self.assertEqual(opcode_in_pickle(expected, s), True)
def test_singletons(self):
# Map (proto, singleton) to expected opcode.
expected_opcode = {(0, None): pickle.NONE,
(1, None): pickle.NONE,
(2, None): pickle.NONE,
(3, None): pickle.NONE,
(0, True): pickle.INT,
(1, True): pickle.INT,
(2, True): pickle.NEWTRUE,
(3, True): pickle.NEWTRUE,
(0, False): pickle.INT,
(1, False): pickle.INT,
(2, False): pickle.NEWFALSE,
(3, False): pickle.NEWFALSE,
}
for proto in protocols:
for x in None, False, True:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertTrue(x is y, (proto, x, s, y))
expected = expected_opcode[proto, x]
self.assertEqual(opcode_in_pickle(expected, s), True)
def test_newobj_tuple(self):
x = MyTuple([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(tuple(x), tuple(y))
self.assertEqual(x.__dict__, y.__dict__)
def test_newobj_list(self):
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(list(x), list(y))
self.assertEqual(x.__dict__, y.__dict__)
def test_newobj_generic(self):
for proto in protocols:
for C in myclasses:
B = C.__base__
x = C(C.sample)
x.foo = 42
s = self.dumps(x, proto)
y = self.loads(s)
detail = (proto, C, B, x, y, type(y))
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
# Register a type with copy_reg, with extension code extcode. Pickle
# an object of that type. Check that the resulting pickle uses opcode
# (EXT[124]) under proto 2, and not in proto 1.
def produce_global_ext(self, extcode, opcode):
e = ExtensionSaver(extcode)
try:
copy_reg.add_extension(__name__, "MyList", extcode)
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
# Dump using protocol 1 for comparison.
s1 = self.dumps(x, 1)
self.assertIn(__name__, s1)
self.assertIn("MyList", s1)
self.assertEqual(opcode_in_pickle(opcode, s1), False)
y = self.loads(s1)
self.assertEqual(list(x), list(y))
self.assertEqual(x.__dict__, y.__dict__)
# Dump using protocol 2 for test.
s2 = self.dumps(x, 2)
self.assertNotIn(__name__, s2)
self.assertNotIn("MyList", s2)
self.assertEqual(opcode_in_pickle(opcode, s2), True)
y = self.loads(s2)
self.assertEqual(list(x), list(y))
self.assertEqual(x.__dict__, y.__dict__)
finally:
e.restore()
def test_global_ext1(self):
self.produce_global_ext(0x00000001, pickle.EXT1) # smallest EXT1 code
self.produce_global_ext(0x000000ff, pickle.EXT1) # largest EXT1 code
def test_global_ext2(self):
self.produce_global_ext(0x00000100, pickle.EXT2) # smallest EXT2 code
self.produce_global_ext(0x0000ffff, pickle.EXT2) # largest EXT2 code
self.produce_global_ext(0x0000abcd, pickle.EXT2) # check endianness
def test_global_ext4(self):
self.produce_global_ext(0x00010000, pickle.EXT4) # smallest EXT4 code
self.produce_global_ext(0x7fffffff, pickle.EXT4) # largest EXT4 code
self.produce_global_ext(0x12abcdef, pickle.EXT4) # check endianness
def test_list_chunking(self):
n = 10 # too small to chunk
x = range(n)
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
self.assertEqual(num_appends, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = range(n)
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
if proto == 0:
self.assertEqual(num_appends, 0)
else:
self.assertTrue(num_appends >= 2)
def test_dict_chunking(self):
n = 10 # too small to chunk
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
self.assertEqual(num_setitems, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
if proto == 0:
self.assertEqual(num_setitems, 0)
else:
self.assertTrue(num_setitems >= 2)
def test_simple_newobj(self):
x = object.__new__(SimpleNewObj) # avoid __init__
x.abc = 666
for proto in protocols:
s = self.dumps(x, proto)
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s), proto >= 2)
y = self.loads(s) # will raise TypeError if __init__ called
self.assertEqual(y.abc, 666)
self.assertEqual(x.__dict__, y.__dict__)
def test_newobj_list_slots(self):
x = SlotList([1, 2, 3])
x.foo = 42
x.bar = "hello"
s = self.dumps(x, 2)
y = self.loads(s)
self.assertEqual(list(x), list(y))
self.assertEqual(x.__dict__, y.__dict__)
self.assertEqual(x.foo, y.foo)
self.assertEqual(x.bar, y.bar)
def test_reduce_overrides_default_reduce_ex(self):
for proto in protocols:
x = REX_one()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 0)
def test_reduce_ex_called(self):
for proto in protocols:
x = REX_two()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_overrides_reduce(self):
for proto in protocols:
x = REX_three()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_calls_base(self):
for proto in protocols:
x = REX_four()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, proto)
def test_reduce_calls_base(self):
for proto in protocols:
x = REX_five()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 1)
def test_reduce_bad_iterator(self):
# Issue4176: crash when 4th and 5th items of __reduce__()
# are not iterators
class C(object):
def __reduce__(self):
# 4th item is not an iterator
return list, (), None, [], None
class D(object):
def __reduce__(self):
# 5th item is not an iterator
return dict, (), None, None, []
# Protocol 0 is less strict and also accept iterables.
for proto in protocols:
try:
self.dumps(C(), proto)
except (AttributeError, pickle.PickleError, cPickle.PickleError):
pass
try:
self.dumps(D(), proto)
except (AttributeError, pickle.PickleError, cPickle.PickleError):
pass
def test_many_puts_and_gets(self):
# Test that internal data structures correctly deal with lots of
# puts/gets.
keys = ("aaa" + str(i) for i in xrange(100))
large_dict = dict((k, [4, 5, 6]) for k in keys)
obj = [dict(large_dict), dict(large_dict), dict(large_dict)]
for proto in protocols:
dumped = self.dumps(obj, proto)
loaded = self.loads(dumped)
self.assertEqual(loaded, obj,
"Failed protocol %d: %r != %r"
% (proto, obj, loaded))
def test_attribute_name_interning(self):
# Test that attribute names of pickled objects are interned when
# unpickling.
for proto in protocols:
x = C()
x.foo = 42
x.bar = "hello"
s = self.dumps(x, proto)
y = self.loads(s)
x_keys = sorted(x.__dict__)
y_keys = sorted(y.__dict__)
for x_key, y_key in zip(x_keys, y_keys):
self.assertIs(x_key, y_key)
if sys.version_info < (2, 7):
def assertIs(self, expr1, expr2, msg=None):
self.assertTrue(expr1 is expr2, msg)
def assertIn(self, expr1, expr2, msg=None):
self.assertTrue(expr1 in expr2, msg)
def assertNotIn(self, expr1, expr2, msg=None):
self.assertTrue(expr1 not in expr2, msg)
AbstractPickleTests.assertIs = assertIs
AbstractPickleTests.assertIn = assertIn
AbstractPickleTests.assertNotIn = assertNotIn
# Test classes for reduce_ex
class REX_one(object):
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return REX_one, ()
# No __reduce_ex__ here, but inheriting it from object
class REX_two(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
# No __reduce__ here, but inheriting it from object
class REX_three(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
def __reduce__(self):
raise TestFailed, "This __reduce__ shouldn't be called"
class REX_four(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return object.__reduce_ex__(self, proto)
# Calling base class method should succeed
class REX_five(object):
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return object.__reduce__(self)
# This one used to fail with infinite recursion
# Test classes for newobj
class MyInt(int):
sample = 1
class MyLong(long):
sample = 1L
class MyFloat(float):
sample = 1.0
class MyComplex(complex):
sample = 1.0 + 0.0j
class MyStr(str):
sample = "hello"
class MyUnicode(unicode):
sample = u"hello \u1234"
class MyTuple(tuple):
sample = (1, 2, 3)
class MyList(list):
sample = [1, 2, 3]
class MyDict(dict):
sample = {"a": 1, "b": 2}
myclasses = [MyInt, MyLong, MyFloat,
MyComplex,
MyStr, MyUnicode,
MyTuple, MyList, MyDict]
class SlotList(MyList):
__slots__ = ["foo"]
class SimpleNewObj(object):
def __init__(self, a, b, c):
# raise an error, to make sure this isn't called
raise TypeError("SimpleNewObj.__init__() didn't expect to get called")
class AbstractPickleModuleTests(unittest.TestCase):
def test_dump_closed_file(self):
import os
f = open(TESTFN, "w")
try:
f.close()
self.assertRaises(ValueError, self.module.dump, 123, f)
finally:
os.remove(TESTFN)
def test_load_closed_file(self):
import os
f = open(TESTFN, "w")
try:
f.close()
self.assertRaises(ValueError, self.module.dump, 123, f)
finally:
os.remove(TESTFN)
def test_load_from_and_dump_to_file(self):
stream = cStringIO.StringIO()
data = [123, {}, 124]
self.module.dump(data, stream)
stream.seek(0)
unpickled = self.module.load(stream)
self.assertEqual(unpickled, data)
def test_highest_protocol(self):
# Of course this needs to be changed when HIGHEST_PROTOCOL changes.
self.assertEqual(self.module.HIGHEST_PROTOCOL, 3)
def test_callapi(self):
f = cStringIO.StringIO()
# With and without keyword arguments
self.module.dump(123, f, -1)
self.module.dump(123, file=f, protocol=-1)
self.module.dumps(123, -1)
self.module.dumps(123, protocol=-1)
self.module.Pickler(f, -1)
self.module.Pickler(f, protocol=-1)
def test_incomplete_input(self):
s = StringIO.StringIO("X''.")
self.assertRaises(EOFError, self.module.load, s)
@skipIf(_is_pypy, "Fails to access the redefined builtins")
def test_restricted(self):
# issue7128: cPickle failed in restricted mode
builtins = {'pickleme': self.module,
'__import__': __import__}
d = {}
teststr = "def f(): pickleme.dumps(0)"
exec teststr in {'__builtins__': builtins}, d
d['f']()
def test_bad_input(self):
# Test issue4298
s = '\x58\0\0\0\x54'
self.assertRaises(EOFError, self.module.loads, s)
# Test issue7455
s = '0'
# XXX Why doesn't pickle raise UnpicklingError?
self.assertRaises((IndexError, cPickle.UnpicklingError),
self.module.loads, s)
class AbstractPersistentPicklerTests(unittest.TestCase):
# This class defines persistent_id() and persistent_load()
# functions that should be used by the pickler. All even integers
# are pickled using persistent ids.
def persistent_id(self, object):
if isinstance(object, int) and object % 2 == 0:
self.id_count += 1
return str(object)
else:
return None
def persistent_load(self, oid):
self.load_count += 1
object = int(oid)
assert object % 2 == 0
return object
def test_persistence(self):
self.id_count = 0
self.load_count = 0
L = range(10)
self.assertEqual(self.loads(self.dumps(L)), L)
self.assertEqual(self.id_count, 5)
self.assertEqual(self.load_count, 5)
def test_bin_persistence(self):
self.id_count = 0
self.load_count = 0
L = range(10)
self.assertEqual(self.loads(self.dumps(L, 1)), L)
self.assertEqual(self.id_count, 5)
self.assertEqual(self.load_count, 5)
REDUCE_A = 'reduce_A'
class AAA(object):
def __reduce__(self):
return str, (REDUCE_A,)
class BBB(object):
pass
class AbstractPicklerUnpicklerObjectTests(unittest.TestCase):
pickler_class = None
unpickler_class = None
def setUp(self):
assert self.pickler_class
assert self.unpickler_class
def test_clear_pickler_memo(self):
# To test whether clear_memo() has any effect, we pickle an object,
# then pickle it again without clearing the memo; the two serialized
# forms should be different. If we clear_memo() and then pickle the
# object again, the third serialized form should be identical to the
# first one we obtained.
data = ["abcdefg", "abcdefg", 44]
f = cStringIO.StringIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
# Reset StringIO object.
f.seek(0)
f.truncate()
pickler.dump(data)
second_pickled = f.getvalue()
# Reset the Pickler and StringIO objects.
pickler.clear_memo()
f.seek(0)
f.truncate()
pickler.dump(data)
third_pickled = f.getvalue()
self.assertNotEqual(first_pickled, second_pickled)
self.assertEqual(first_pickled, third_pickled)
def test_priming_pickler_memo(self):
# Verify that we can set the Pickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = cStringIO.StringIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = cStringIO.StringIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
self.assertNotEqual(first_pickled, primed_pickled)
def test_priming_unpickler_memo(self):
# Verify that we can set the Unpickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = cStringIO.StringIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = cStringIO.StringIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
unpickler = self.unpickler_class(cStringIO.StringIO(first_pickled))
unpickled_data1 = unpickler.load()
self.assertEqual(unpickled_data1, data)
primed = self.unpickler_class(cStringIO.StringIO(primed_pickled))
primed.memo = unpickler.memo
unpickled_data2 = primed.load()
primed.memo.clear()
self.assertEqual(unpickled_data2, data)
self.assertTrue(unpickled_data2 is unpickled_data1)
def test_reusing_unpickler_objects(self):
data1 = ["abcdefg", "abcdefg", 44]
f = cStringIO.StringIO()
pickler = self.pickler_class(f)
pickler.dump(data1)
pickled1 = f.getvalue()
data2 = ["abcdefg", 44, 44]
f = cStringIO.StringIO()
pickler = self.pickler_class(f)
pickler.dump(data2)
pickled2 = f.getvalue()
f = cStringIO.StringIO()
f.write(pickled1)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data1)
f.seek(0)
f.truncate()
f.write(pickled2)
f.seek(0)
self.assertEqual(unpickler.load(), data2)
def test_noload_object(self):
global _NOLOAD_OBJECT
after = {}
_NOLOAD_OBJECT = object()
aaa = AAA()
bbb = BBB()
ccc = 1
ddd = 1.0
eee = ('eee', 1)
fff = ['fff']
ggg = {'ggg': 0}
unpickler = self.unpickler_class
f = io.BytesIO()
pickler = self.pickler_class(f, protocol=2)
pickler.dump(_NOLOAD_OBJECT)
after['_NOLOAD_OBJECT'] = f.tell()
pickler.dump(aaa)
after['aaa'] = f.tell()
pickler.dump(bbb)
after['bbb'] = f.tell()
pickler.dump(ccc)
after['ccc'] = f.tell()
pickler.dump(ddd)
after['ddd'] = f.tell()
pickler.dump(eee)
after['eee'] = f.tell()
pickler.dump(fff)
after['fff'] = f.tell()
pickler.dump(ggg)
after['ggg'] = f.tell()
f.seek(0)
unpickler = self.unpickler_class(f)
unpickler.noload() # read past _NOLOAD_OBJECT
self.assertEqual(f.tell(), after['_NOLOAD_OBJECT'])
noload = unpickler.noload() # read past aaa
self.assertEqual(noload, None)
self.assertEqual(f.tell(), after['aaa'])
unpickler.noload() # read past bbb
self.assertEqual(f.tell(), after['bbb'])
noload = unpickler.noload() # read past ccc
self.assertEqual(noload, ccc)
self.assertEqual(f.tell(), after['ccc'])
noload = unpickler.noload() # read past ddd
self.assertEqual(noload, ddd)
self.assertEqual(f.tell(), after['ddd'])
noload = unpickler.noload() # read past eee
self.assertEqual(noload, eee)
self.assertEqual(f.tell(), after['eee'])
noload = unpickler.noload() # read past fff
self.assertEqual(noload, fff)
self.assertEqual(f.tell(), after['fff'])
noload = unpickler.noload() # read past ggg
self.assertEqual(noload, ggg)
self.assertEqual(f.tell(), after['ggg'])
def test_functional_noload_dict_subclass(self):
"""noload() doesn't break or produce any output given a dict subclass"""
# See http://bugs.python.org/issue1101399
o = MyDict()
o['x'] = 1
f = io.BytesIO()
pickler = self.pickler_class(f, protocol=2)
pickler.dump(o)
f.seek(0)
unpickler = self.unpickler_class(f)
noload = unpickler.noload()
self.assertEqual(noload, None)
def test_functional_noload_list_subclass(self):
"""noload() doesn't break or produce any output given a list subclass"""
# See http://bugs.python.org/issue1101399
o = MyList()
o.append(1)
f = io.BytesIO()
pickler = self.pickler_class(f, protocol=2)
pickler.dump(o)
f.seek(0)
unpickler = self.unpickler_class(f)
noload = unpickler.noload()
self.assertEqual(noload, None)
def test_functional_noload_dict(self):
"""noload() implements the Python 2.6 behaviour and fills in dicts"""
# See http://bugs.python.org/issue1101399
o = dict()
o['x'] = 1
f = io.BytesIO()
pickler = self.pickler_class(f, protocol=2)
pickler.dump(o)
f.seek(0)
unpickler = self.unpickler_class(f)
noload = unpickler.noload()
self.assertEqual(noload, o)
def test_functional_noload_list(self):
"""noload() implements the Python 2.6 behaviour and fills in lists"""
# See http://bugs.python.org/issue1101399
o = list()
o.append(1)
f = io.BytesIO()
pickler = self.pickler_class(f, protocol=2)
pickler.dump(o)
f.seek(0)
unpickler = self.unpickler_class(f)
noload = unpickler.noload()
self.assertEqual(noload, o)
class BigmemPickleTests(unittest.TestCase):
# Memory requirements: 1 byte per character for input strings, 1 byte
# for pickled data, 1 byte for unpickled strings, 1 byte for internal
# buffer and 1 byte of free space for resizing of internal buffer.
@precisionbigmemtest(size=_2G + 100*_1M, memuse=5)
def test_huge_strlist(self, size):
chunksize = 2**20
data = []
while size > chunksize:
data.append('x' * chunksize)
size -= chunksize
chunksize += 1
data.append('y' * size)
try:
for proto in protocols:
try:
pickled = self.dumps(data, proto)
res = self.loads(pickled)
self.assertEqual(res, data)
finally:
res = None
pickled = None
finally:
data = None
| gpl-2.0 |
luismagr/info_poblaciones | csv2sql.py | 1 | 4435 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import os
GENERATE_SQL_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"generated_sql")
def volcar_a_fichero(sql_strings, file_name):
"""
Recorre la lista de cadenas recibida y las vuelca en un fichero de texto.
"""
abs_file_name = os.path.join(GENERATE_SQL_DIR, file_name)
print("Volcando a %s..." % abs_file_name)
inserts = open(abs_file_name, "a")
for sql in sql_strings:
inserts.write(sql)
inserts.close()
def set_database(database_name, file_name):
"""
Establece el 'use' necesario en el script correspondiente para seleccionar
la bd con la que se va a trabajar.
"""
abs_file_name = os.path.join(GENERATE_SQL_DIR, file_name)
uses = open(abs_file_name, "w")
uses.write("%s;\n" % database_name)
uses.close()
def main():
dic_countries, dic_provinces, dic_towns, dic_cps = parse_fichero_csv()
inserts_countries = create_sql_countries(dic_countries)
inserts_provinces = create_sql_provinces(dic_provinces)
inserts_towns = create_sql_towns(dic_towns)
inserts_cps = create_sql_cps(dic_cps)
set_database("use infodb", "countries.sql")
volcar_a_fichero(inserts_countries, "countries.sql")
set_database("use infodb", "provinces.sql")
volcar_a_fichero(inserts_provinces, "provinces.sql")
set_database("use infodb", "towns.sql")
volcar_a_fichero(inserts_towns, "towns.sql")
set_database("use infodb", "cps.sql")
volcar_a_fichero(inserts_cps, "cps.sql")
def parse_fichero_csv():
# Read the first csv with the contries
csvfile = open(os.path.join("CSV" , "country.csv"), "r")
reader = csv.reader(csvfile)
countries = {}
counter = 1
for row in reader:
countries[row[1]] = counter
counter += 1
csvfile.close()
#Open the CSV file with all the information
csvfile = open(os.path.join("CSV", "postal_codes.csv"), "r")
reader = csv.reader(csvfile)
#Create the dictionaries
provinces = {}
towns = {}
cps = {}
#Loop the file creating the dictionaries
province_counter = 1
towns_counter = 1
cp_counter = 1
for row in reader:
cp = row[0]
town = row[2]
province = row[3]
if province not in provinces:
provinces[province] = {"id": province_counter,
"country": countries["España"]}
province_counter += 1
if town not in towns:
towns[town] = {"id": towns_counter, "province": provinces[province]["id"]}
towns_counter += 1
cps[cp] = {"id": cp_counter, "town": towns[town]["id"]}
cp_counter += 1
csvfile.close()
return countries, provinces, towns, cps
def create_sql_countries(dic_countries):
list_sql_sentences = []
for country_key in list(dic_countries.keys()):
list_sql_sentences.append(
"INSERT INTO `Country` (id, name) "
"VALUES({0:03d}, '{1}');\n".format(
dic_countries[country_key],
country_key))
list_sql_sentences.sort()
return list_sql_sentences
def create_sql_provinces(dic_provinces):
list_sql_sentences = []
for province_key in list(dic_provinces.keys()):
list_sql_sentences.append(
"INSERT INTO `Province` (id, name, country_id) "
"VALUES({0:03d}, '{1}', {2:03d});\n".format(
dic_provinces[province_key]["id"],
province_key,
dic_provinces[province_key]["country"]))
list_sql_sentences.sort()
return list_sql_sentences
def create_sql_towns(dic_towns):
list_sql_sentences = []
for town_key in list(dic_towns.keys()):
sql = "INSERT INTO `Town` (id, name, province_id) VALUES({0:03d}, \"{1}\", {2:03d});\n".format(
dic_towns[town_key]["id"],
town_key,
dic_towns[town_key]["province"])
list_sql_sentences.append(sql)
list_sql_sentences.sort()
return list_sql_sentences
def create_sql_cps(dic_cps):
list_sql_sentences = []
for cp_key in list(dic_cps.keys()):
list_sql_sentences.append("INSERT INTO `CodPostal` (id, codpostal, town) VALUES({0:03d}, '{1}', {2:03d});\n".format(dic_cps[cp_key]["id"],
cp_key, dic_cps[cp_key]["town"]))
list_sql_sentences.sort()
return list_sql_sentences
#At the end of the file
if __name__ == "__main__":
main()
| gpl-3.0 |
dreilly369/subterfuge | main/views.py | 22 | 17460 | import os
import sys
sys.path.append('/usr/share/subterfuge/utilities')
#Ignore Deprication Warnings
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
#Django Web Modules
from django.template import Context, loader
from django.http import HttpResponse
from django.http import HttpRequest
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.cache import never_cache
from django.template import RequestContext
from django import forms
#Database Models
from subterfuge.main.models import *
from subterfuge.modules.models import *
#Additional Views
from subterfuge.cease.views import *
from subterfuge.modules.views import *
from subfunctions import statuscheck
@csrf_protect
@never_cache
def index(request):
if request.is_ajax():
#Get Creds from DB
creds = credentials.objects.all()
#Reset Injection Counter
iptrack.objects.update(injected = "0")
#Check Arpspoof status
status = statuscheck()
#Relay Template Variables
return render_to_response("includes/credtable.inc", {
"credential" : creds,
"status" : status,
})
else:
#Check Attack status
status = statuscheck()
#Get Current Settings from DB
settings = setup.objects.all()
#Relay Template Variables
return render_to_response("home.ext", {
"status" : status,
"setup" : settings,
})
def notifications(request):
if request.is_ajax():
#Get Creds from DB
creds = credentials.objects.all()
#Reset Injection Counter
iptrack.objects.update(injected = "0")
#Check Arpspoof status
status = statuscheck()
alerts = notification.objects.all()
#Relay Template Variables
return render_to_response("includes/notificationtable.inc", {
"credential" : creds,
"status" : status,
"alerts" : alerts
})
else:
#Check Attack status
status = statuscheck()
#Get Current Settings from DB
settings = setup.objects.all()
#Relay Template Variables
return render_to_response("notifications.ext", {
"status" : status,
"setup" : settings
})
def plugins(request):
if request.is_ajax():
print "AJAX REQUEST!"
else:
#Read in Config File
f = open(str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'subterfuge.conf', 'r')
config = f.readlines()
modules = installed.objects.all()
#Relay Template Variables
return render_to_response("plugins.ext", {
"config" : config,
"modules" : modules,
})
def hostcheck(request):
if request.is_ajax():
#Read in Config File
f = open(str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'subterfuge.conf', 'r')
config = f.readlines()
#Check ARP Poison status
command = "ps -A 1 | sed -e '/arpmitm/!d;/sed -e/d;s/^ //;s/ pts.*//'"
a = os.popen(command)
reply = a.read()
if(len(reply)>1):
status = "on"
else:
status = "off"
modules = installed.objects.all()
client = iptrack.objects.exclude(id = "1").all()
scanout = scan.objects.all()
alerts = notification.objects.all()
for data in alerts:
pass
notification.objects.update(status = "old")
#Relay Template Variables
return render_to_response("includes/hostcheck.inc", {
"config" : config,
"modules" : modules,
"client" : client,
"scan" : scanout,
"status" : status,
"alerts" : alerts
})
def netview(request):
if request.is_ajax():
#Read in Config File
f = open(str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'subterfuge.conf', 'r')
config = f.readlines()
#Check ARP Poison status
command = "ps -A 1 | sed -e '/arpmitm/!d;/sed -e/d;s/^ //;s/ pts.*//'"
a = os.popen(command)
reply = a.read()
if(len(reply)>1):
status = "on"
else:
status = "off"
modules = installed.objects.all()
client = iptrack.objects.exclude(id = "1").all()
scanout = scan.objects.all()
#Relay Template Variables
return render_to_response("includes/netview.inc", {
"config" : config,
"modules" : modules,
"client" : client,
"scan" : scanout,
"status" : status,
})
else:
#Read in Config File
f = open(str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'subterfuge.conf', 'r')
config = f.readlines()
#Check ARP Poison status
command = "ps -A 1 | sed -e '/arpmitm/!d;/sed -e/d;s/^ //;s/ pts.*//'"
a = os.popen(command)
reply = a.read()
if(len(reply)>1):
status = "on"
else:
status = "off"
modules = installed.objects.all()
client = iptrack.objects.exclude(id = "1").all()
scanout = scan.objects.all()
#Get Current Settings from DB
settings = setup.objects.all()
#Relay Template Variables
return render_to_response("netview.ext", {
"config" : config,
"modules" : modules,
"client" : client,
"scan" : scanout,
"status" : status,
"setup" : settings,
})
def netctrl(request, cmd):
if request.is_ajax():
if cmd == "scan":
address = request.POST["target"]
os.system("python " + str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + "utilities/scan.py " + address + " &")
if cmd == "expand":
iptrack.objects.filter(address = request.POST["address"]).update(expand = "1")
if cmd == "shrink":
iptrack.objects.filter(address = request.POST["address"]).update(expand = "0")
#Read in Config File
f = open(str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'subterfuge.conf', 'r')
config = f.readlines()
modules = installed.objects.all()
client = iptrack.objects.exclude(id = "1").all()
scanout = scan.objects.all()
#Relay Template Variables
return render_to_response("mods/netview.mod", {
"config" : config,
"modules" : modules,
"client" : client,
"scan" : scanout,
})
else:
#Relay Template Variables
return render_to_response("netview.ext", {
"config" : "um",
})
#Writes to the Config File are handled here
def conf(request, module):
# Read in subterfuge.conf
with open(str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'subterfuge.conf', 'r') as file:
conf = file.readlines()
# Subterfuge Settings Configuration
# Edit subterfuge.conf
if module == "settings":
#Attack Setup
try:
setup.objects.update(iface = request.POST["iface"])
conf[15] = request.POST["iface"] + "\n"
print "Using Interface => " + request.POST["iface"]
except:
pass
try:
if request.POST["auto"] == "true":
setup.objects.update(autoconf = "yes")
conf[20] = "yes" + "\n"
print "Auto Configure => yes"
else:
setup.objects.update(autoconf = "no")
conf[20] = "no" + "\n"
print "Auto Configure => no"
except:
pass
try:
setup.objects.update(gateway = request.POST["agw"])
conf[17] = request.POST["agw"] + "\n"
print "Using Gateway => " + request.POST["agw"]
except:
pass
try:
setup.objects.update(gateway = request.POST["mgw"])
conf[17] = request.POST["mgw"] + "\n"
print "Using Gateway => " + request.POST["mgw"]
except:
pass
#Get the Local IP Address
try:
f = os.popen("ifconfig " + request.POST["iface"] + " | grep \"inet addr\" | sed -e \'s/.*addr://;s/ .*//\'")
temp2 = ''
temp3 = ''
temp = f.readline().rstrip('\n')
ipaddress = re.findall(r'\d*.\d*.\d*.\d*', temp)[0]
conf[26] = ipaddress + "\n"
setup.objects.update(ip = ipaddress)
except:
pass
#Configuration
try:
setup.objects.update(ploadrate = request.POST["ploadrate"])
setup.objects.update(injectrate = request.POST["injectrate"])
if request.POST["smartarp"] == "true":
setup.objects.update(smartarp = "yes")
elif request.POST["smartarp"] == "false":
setup.objects.update(smartarp = "no")
setup.objects.update(arprate = request.POST["arprate"])
except:
pass
#Vectors
try:
if request.POST["active"] == "true":
vectors.objects.filter(name = request.POST["vector"]).update(active = "yes")
else:
vectors.objects.filter(name = request.POST["vector"]).update(active = "no")
#Wireless AP Generator Settings
if request.POST["vector"] == "Wireless AP Generator":
apgen.objects.update(essid = request.POST["essid"])
apgen.objects.update(channel = request.POST["channel"])
apgen.objects.update(atknic = request.POST["atknic"])
apgen.objects.update(netnic = request.POST["netnic"])
except:
pass
#Advanced
try:
scanip = request.POST["scantargetip"]
print "Importing Nmap scan for: " + scanip
#Get/Write Files
if request.FILES['scanresults']:
scanresults = request.FILES['scanresults']
dest = open(str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'utilities/scans/' + scanip + '.xml', 'wb+')
for chunk in scanresults.chunks():
dest.write(chunk)
dest.close()
#Execute Scan
os.system('python ' + str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'utilities/scan.py ' + scanip)
#Relay Template Variables
return render_to_response("settings.ext", {
"config" : config,
"conf" : str(config[20]).rstrip('\n'),
"iface" : result,
"gateway" : gw,
"status" : status,
"setup" : currentsetup,
})
except:
pass
if module == "update":
os.system('python ' + str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'update.py')
if module == "exportcreds":
os.system('python ' + str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'exportcreds.py')
#################################
#Subterfuge Module Configurations
#################################
if module == "httpinjection":
httpcodeinjection(request, conf)
elif module == "tunnelblock":
tunnelblock()
else:
for mod in installed.objects.all():
if module == mod.name:
os.system('python ' + str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'modules/' + module + '/' + module + '.py &')
#################################
# END MODULE CONFIGURATION
#################################
# Write to subterfuge.conf
with open(str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'subterfuge.conf', 'w') as file:
file.writelines(conf)
# Call Index Page
# Check Arpspoof status
command = "ps -A 1 | sed -e '/arpmitm/!d;/sed -e/d;s/^ //;s/ pts.*//'"
a = os.popen(command)
reply = a.read()
if(len(reply)>1):
status = "on"
else:
status = "off"
if module == "httpinjection" or module == "tunnelblock":
#Relay Template Variables
modules = installed.objects.all()
return render_to_response("plugins.ext", {
"modules" : modules,
})
else:
#Relay Template Variables
return render_to_response(request.META['HTTP_REFERER'].split('/')[3] + ".ext", {
"status" : status,
})
def settings(request):
if request.is_ajax():
print "AJAX REQUEST!"
else:
#Get Interfaces
f = os.popen("ls /sys/class/net/")
temp = ''
temp = f.readline().rstrip('\n')
result = []
result.append(temp)
while (temp != ''):
temp = f.readline().rstrip('\n')
if (temp != 'lo'):
result.append(temp)
result.remove('')
#Get Gateway
gw = []
e = os.popen("route -n | grep 'UG[ \t]' | awk '{print $2}'")
ttemp = ''
ttemp = e.readline().rstrip('\n')
if not ttemp:
print 'No default gateway present'
else:
gw.append(ttemp)
temp = ''
gw.append(temp)
for interface in result:
f = os.popen("ifconfig " + interface + " | grep \"inet addr\" | sed -e \'s/.*addr://;s/ .*//\'")
temp2 = ''
temp3 = ''
try:
temp = f.readline().rstrip('\n')
temp2 = re.findall(r'\d*.\d*.\d*.', temp)
except:
"No default gw on " + interface
try:
if not temp2:
print "No default gw on " + interface
else:
gate = temp2[0] + '1'
gw.append(gate)
gw.remove('')
gw.reverse()
except:
print "Something went wrong when determining network gateway information"
os.system("python /usr/share/subterfuge/utilities/notification.py 'Gateway Error' 'Subterfuge was unable to detect a default gw on any of your interfaces. Sorry.'")
#Read in Config File
f = open(str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'subterfuge.conf', 'r')
config = f.readlines()
#Check Arpspoof status
command = "ps -A 1 | sed -e '/arpmitm/!d;/sed -e/d;s/^ //;s/ pts.*//'"
a = os.popen(command)
reply = a.read()
if(len(reply)>1):
status = "on"
else:
status = "off"
currentsetup = setup.objects.all()
availablevectors = vectors.objects.all()
#Relay Template Variables
return render_to_response("settings.ext", {
"config" : config,
"conf" : str(config[20]).rstrip('\n'),
"iface" : result,
"gateway" : gw,
"status" : status,
"setup" : currentsetup,
"vectors" : availablevectors,
})
#Command Definitions:
def startpwn(request, method):
if request.is_ajax():
os.system("python " + str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + "attackctrl.py " + method +" &")
else:
print "Nope... Chuck Testa!"
def stoppwn(request):
if request.is_ajax():
print "Ceasing Pwn Ops..."
cease()
else:
print "Nope... Chuck Testa!"
def resetpwn(request):
if request.is_ajax():
print "Resetting Pwn DB..."
#For MySQL
#cmd = "mysql --force harvester -u root -ppass < /harvester/templates/flush.sql"
cmd = "cp " + str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + "/base_db " + str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + "/db"
os.system(cmd)
else:
print "Nope... Chuck Testa!"
def gate(request):
if request.is_ajax():
print "Loading Default Gateway"
f = os.popen("ifconfig " + interface + " | grep \"inet addr\" | sed -e \'s/.*addr://;s/ .*//\'")
temp = ''
temp2 = ''
temp3 = ''
temp = f.readline().rstrip('\n')
temp2 = re.findall(r'\d*.\d*.\d*.', temp)
temp3 = temp2[0]
temp3 = temp3 + '1'
#Relay Template Variables
return render_to_response("includes/gateway.inc", {
"gateway" : temp3,
})
else:
print "Nope... Chuck Testa!"
| gpl-3.0 |
jezdez-archive/queues | queues/backends/redisd.py | 1 | 2715 | """
Backend for redis.
Requires redis.py from the redis source (found in client-libraries/python).
"""
from queues.backends.base import BaseQueue
from queues import InvalidBackend, QueueException
import os
try:
import redis
except ImportError:
raise InvalidBackend("Unable to import redis.")
CONN = DB = None
try:
from django.conf import settings
CONN = getattr(settings, 'QUEUE_REDIS_CONNECTION', None)
DB = getattr(settings, 'QUEUE_REDIS_DB', None)
TIMEOUT = getattr(settings, 'QUEUE_REDIS_TIMEOUT', None)
except:
CONN = os.environ.get('QUEUE_REDIS_CONNECTION', None)
DB = os.environ.get('QUEUE_REDIS_DB', None)
TIMEOUT = os.environ.get('QUEUE_REDIS_TIMEOUT', None)
if not CONN:
raise InvalidBackend("QUEUE_REDIS_CONNECTION not set.")
try:
host, port = CONN.split(':')
except ValueError:
raise InvalidBackend("QUEUE_REDIS_CONNECTION should be in the format host:port (such as localhost:6379).")
try:
port = int(port)
except ValueError:
raise InvalidBackend("Port portion of QUEUE_REDIS_CONNECTION should be an integer.")
def _get_connection(host=host, port=port, db=DB, timeout=TIMEOUT):
kwargs = {'host' : host, 'port' : port}
if db:
kwargs['db'] = db
if timeout:
kwargs['timeout'] = timeout
return redis.Redis(**kwargs)
class Queue(BaseQueue):
def __init__(self, name):
try:
self.name = name
self.backend = 'redis'
self._connection = _get_connection()
except redis.RedisError, e:
raise QueueException, "%s" % e
def read(self):
try:
return self._connection.lpop(self.name)
except redis.RedisError, e:
raise QueueException, "%s" % e
def write(self, value):
try:
resp = self._connection.rpush(self.name, value)
if resp in ('OK', 1):
return True
else:
return False
except redis.RedisError, e:
raise QueueException, "%s" % e
def __len__(self):
try:
return self._connection.llen(self.name)
except redis.RedisError, e:
raise QueueException, "%s" % e
def __repr__(self):
return "<Queue %s>" % self.name
def create_queue():
"""This isn't required, so we noop. Kept here for swapability."""
return True
def delete_queue(name):
"""Delete a queue"""
try:
resp = _get_connection().delete(name)
if resp and resp == 1:
return True
else:
return False
except redis.RedisError, e:
raise QueueException, "%s" % e
def get_list():
return _get_connection().keys('*')
| mit |
awduda/awduda.github.io | venv/lib/python2.7/site-packages/pip/commands/install.py | 323 | 17412 | from __future__ import absolute_import
import logging
import operator
import os
import tempfile
import shutil
import warnings
try:
import wheel
except ImportError:
wheel = None
from pip.req import RequirementSet
from pip.basecommand import RequirementCommand
from pip.locations import virtualenv_no_global, distutils_scheme
from pip.exceptions import (
InstallationError, CommandError, PreviousBuildDirError,
)
from pip import cmdoptions
from pip.utils import ensure_dir, get_installed_version
from pip.utils.build import BuildDirectory
from pip.utils.deprecation import RemovedInPip10Warning
from pip.utils.filesystem import check_path_owner
from pip.wheel import WheelCache, WheelBuilder
logger = logging.getLogger(__name__)
class InstallCommand(RequirementCommand):
"""
Install packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports installing from "requirements files", which provide
an easy way to specify a whole environment to be installed.
"""
name = 'install'
usage = """
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Install packages.'
def __init__(self, *args, **kw):
super(InstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(cmdoptions.constraints())
cmd_opts.add_option(cmdoptions.editable())
cmd_opts.add_option(cmdoptions.requirements())
cmd_opts.add_option(cmdoptions.build_dir())
cmd_opts.add_option(
'-t', '--target',
dest='target_dir',
metavar='dir',
default=None,
help='Install packages into <dir>. '
'By default this will not replace existing files/folders in '
'<dir>. Use --upgrade to replace existing packages in <dir> '
'with new versions.'
)
cmd_opts.add_option(
'-d', '--download', '--download-dir', '--download-directory',
dest='download_dir',
metavar='dir',
default=None,
help=("Download packages into <dir> instead of installing them, "
"regardless of what's already installed."),
)
cmd_opts.add_option(cmdoptions.src())
cmd_opts.add_option(
'-U', '--upgrade',
dest='upgrade',
action='store_true',
help='Upgrade all specified packages to the newest available '
'version. The handling of dependencies depends on the '
'upgrade-strategy used.'
)
cmd_opts.add_option(
'--upgrade-strategy',
dest='upgrade_strategy',
default='eager',
choices=['only-if-needed', 'eager'],
help='Determines how dependency upgrading should be handled. '
'"eager" - dependencies are upgraded regardless of '
'whether the currently installed version satisfies the '
'requirements of the upgraded package(s). '
'"only-if-needed" - are upgraded only when they do not '
'satisfy the requirements of the upgraded package(s).'
)
cmd_opts.add_option(
'--force-reinstall',
dest='force_reinstall',
action='store_true',
help='When upgrading, reinstall all packages even if they are '
'already up-to-date.')
cmd_opts.add_option(
'-I', '--ignore-installed',
dest='ignore_installed',
action='store_true',
help='Ignore the installed packages (reinstalling instead).')
cmd_opts.add_option(cmdoptions.ignore_requires_python())
cmd_opts.add_option(cmdoptions.no_deps())
cmd_opts.add_option(cmdoptions.install_options())
cmd_opts.add_option(cmdoptions.global_options())
cmd_opts.add_option(
'--user',
dest='use_user_site',
action='store_true',
help="Install to the Python user install directory for your "
"platform. Typically ~/.local/, or %APPDATA%\Python on "
"Windows. (See the Python documentation for site.USER_BASE "
"for full details.)")
cmd_opts.add_option(
'--egg',
dest='as_egg',
action='store_true',
help="Install packages as eggs, not 'flat', like pip normally "
"does. This option is not about installing *from* eggs. "
"(WARNING: Because this option overrides pip's normal install"
" logic, requirements files may not behave as expected.)")
cmd_opts.add_option(
'--root',
dest='root_path',
metavar='dir',
default=None,
help="Install everything relative to this alternate root "
"directory.")
cmd_opts.add_option(
'--prefix',
dest='prefix_path',
metavar='dir',
default=None,
help="Installation prefix where lib, bin and other top-level "
"folders are placed")
cmd_opts.add_option(
"--compile",
action="store_true",
dest="compile",
default=True,
help="Compile py files to pyc",
)
cmd_opts.add_option(
"--no-compile",
action="store_false",
dest="compile",
help="Do not compile py files to pyc",
)
cmd_opts.add_option(cmdoptions.use_wheel())
cmd_opts.add_option(cmdoptions.no_use_wheel())
cmd_opts.add_option(cmdoptions.no_binary())
cmd_opts.add_option(cmdoptions.only_binary())
cmd_opts.add_option(cmdoptions.pre())
cmd_opts.add_option(cmdoptions.no_clean())
cmd_opts.add_option(cmdoptions.require_hashes())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
cmdoptions.resolve_wheel_no_use_binary(options)
cmdoptions.check_install_build_global(options)
if options.as_egg:
warnings.warn(
"--egg has been deprecated and will be removed in the future. "
"This flag is mutually exclusive with large parts of pip, and "
"actually using it invalidates pip's ability to manage the "
"installation process.",
RemovedInPip10Warning,
)
if options.allow_external:
warnings.warn(
"--allow-external has been deprecated and will be removed in "
"the future. Due to changes in the repository protocol, it no "
"longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_all_external:
warnings.warn(
"--allow-all-external has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_unverified:
warnings.warn(
"--allow-unverified has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.download_dir:
warnings.warn(
"pip install --download has been deprecated and will be "
"removed in the future. Pip now has a download command that "
"should be used instead.",
RemovedInPip10Warning,
)
options.ignore_installed = True
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
if options.prefix_path:
raise CommandError(
"Can not combine '--user' and '--prefix' as they imply "
"different installation locations"
)
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
install_options.append('--user')
install_options.append('--prefix=')
temp_target_dir = None
if options.target_dir:
options.ignore_installed = True
temp_target_dir = tempfile.mkdtemp()
options.target_dir = os.path.abspath(options.target_dir)
if (os.path.exists(options.target_dir) and not
os.path.isdir(options.target_dir)):
raise CommandError(
"Target path exists but is not a directory, will not "
"continue."
)
install_options.append('--home=' + temp_target_dir)
global_options = options.global_options or []
with self._build_session(options) as session:
finder = self._build_package_finder(options, session)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
if options.cache_dir and not check_path_owner(options.cache_dir):
logger.warning(
"The directory '%s' or its parent directory is not owned "
"by the current user and caching wheels has been "
"disabled. check the permissions and owner of that "
"directory. If executing pip with sudo, you may want "
"sudo's -H flag.",
options.cache_dir,
)
options.cache_dir = None
with BuildDirectory(options.build_dir,
delete=build_delete) as build_dir:
requirement_set = RequirementSet(
build_dir=build_dir,
src_dir=options.src_dir,
download_dir=options.download_dir,
upgrade=options.upgrade,
upgrade_strategy=options.upgrade_strategy,
as_egg=options.as_egg,
ignore_installed=options.ignore_installed,
ignore_dependencies=options.ignore_dependencies,
ignore_requires_python=options.ignore_requires_python,
force_reinstall=options.force_reinstall,
use_user_site=options.use_user_site,
target_dir=temp_target_dir,
session=session,
pycompile=options.compile,
isolated=options.isolated_mode,
wheel_cache=wheel_cache,
require_hashes=options.require_hashes,
)
self.populate_requirement_set(
requirement_set, args, options, finder, session, self.name,
wheel_cache
)
if not requirement_set.has_requirements:
return
try:
if (options.download_dir or not wheel or not
options.cache_dir):
# on -d don't do complex things like building
# wheels, and don't try to build wheels when wheel is
# not installed.
requirement_set.prepare_files(finder)
else:
# build wheels before install.
wb = WheelBuilder(
requirement_set,
finder,
build_options=[],
global_options=[],
)
# Ignore the result: a failed wheel will be
# installed from the sdist/vcs whatever.
wb.build(autobuilding=True)
if not options.download_dir:
requirement_set.install(
install_options,
global_options,
root=options.root_path,
prefix=options.prefix_path,
)
possible_lib_locations = get_lib_location_guesses(
user=options.use_user_site,
home=temp_target_dir,
root=options.root_path,
prefix=options.prefix_path,
isolated=options.isolated_mode,
)
reqs = sorted(
requirement_set.successfully_installed,
key=operator.attrgetter('name'))
items = []
for req in reqs:
item = req.name
try:
installed_version = get_installed_version(
req.name, possible_lib_locations
)
if installed_version:
item += '-' + installed_version
except Exception:
pass
items.append(item)
installed = ' '.join(items)
if installed:
logger.info('Successfully installed %s', installed)
else:
downloaded = ' '.join([
req.name
for req in requirement_set.successfully_downloaded
])
if downloaded:
logger.info(
'Successfully downloaded %s', downloaded
)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
# Clean up
if not options.no_clean:
requirement_set.cleanup_files()
if options.target_dir:
ensure_dir(options.target_dir)
# Checking both purelib and platlib directories for installed
# packages to be moved to target directory
lib_dir_list = []
purelib_dir = distutils_scheme('', home=temp_target_dir)['purelib']
platlib_dir = distutils_scheme('', home=temp_target_dir)['platlib']
if os.path.exists(purelib_dir):
lib_dir_list.append(purelib_dir)
if os.path.exists(platlib_dir) and platlib_dir != purelib_dir:
lib_dir_list.append(platlib_dir)
for lib_dir in lib_dir_list:
for item in os.listdir(lib_dir):
target_item_dir = os.path.join(options.target_dir, item)
if os.path.exists(target_item_dir):
if not options.upgrade:
logger.warning(
'Target directory %s already exists. Specify '
'--upgrade to force replacement.',
target_item_dir
)
continue
if os.path.islink(target_item_dir):
logger.warning(
'Target directory %s already exists and is '
'a link. Pip will not automatically replace '
'links, please remove if replacement is '
'desired.',
target_item_dir
)
continue
if os.path.isdir(target_item_dir):
shutil.rmtree(target_item_dir)
else:
os.remove(target_item_dir)
shutil.move(
os.path.join(lib_dir, item),
target_item_dir
)
shutil.rmtree(temp_target_dir)
return requirement_set
def get_lib_location_guesses(*args, **kwargs):
scheme = distutils_scheme('', *args, **kwargs)
return [scheme['purelib'], scheme['platlib']]
| mit |
cpatrick/ITK-RemoteIO | Modules/ThirdParty/GDCM/src/gdcm/Wrapping/Csharp/doxy2swig.py | 18 | 15682 | #!/usr/bin/env python
"""Doxygen XML to SWIG docstring converter.
Usage:
doxy2swig.py [options] input.xml output.i
Converts Doxygen generated XML files into a file containing docstrings
that can be used by SWIG-1.3.x. Note that you need to get SWIG
version > 1.3.23 or use Robin Dunn's docstring patch to be able to use
the resulting output.
input.xml is your doxygen generated XML file and output.i is where the
output will be written (the file will be clobbered).
Code can be found at prabhu's page:
http://www.aero.iitb.ac.in/~prabhu/software/code/python/doxy2swig.py
Ref:
http://www.enricozini.org/2007/tips/swig-doxygen-docstring.html
http://internetducttape.com/2007/03/20/automatic_documentation_python_doxygen/
"""
######################################################################
#
# This code is implemented using Mark Pilgrim's code as a guideline:
# http://www.faqs.org/docs/diveintopython/kgp_divein.html
#
# Author: Prabhu Ramachandran
# License: BSD style
#
# Thanks:
# Johan Hake: the include_function_definition feature
# Bill Spotz: bug reports and testing.
#
######################################################################
from xml.dom import minidom
import re
import textwrap
import sys
import types
import os.path
import optparse
def my_open_read(source):
if hasattr(source, "read"):
return source
else:
return open(source)
def my_open_write(dest):
if hasattr(dest, "write"):
return dest
else:
return open(dest, 'w')
class Doxy2SWIG:
"""Converts Doxygen generated XML files into a file containing
docstrings that can be used by SWIG-1.3.x that have support for
feature("docstring"). Once the data is parsed it is stored in
self.pieces.
"""
def __init__(self, src, include_function_definition=True, quiet=False):
"""Initialize the instance given a source object. `src` can
be a file or filename. If you do not want to include function
definitions from doxygen then set
`include_function_definition` to `False`. This is handy since
this allows you to use the swig generated function definition
using %feature("autodoc", [0,1]).
"""
f = my_open_read(src)
self.my_dir = os.path.dirname(f.name)
self.xmldoc = minidom.parse(f).documentElement
f.close()
self.pieces = []
self.pieces.append('\n// File: %s\n'%\
os.path.basename(f.name))
self.space_re = re.compile(r'\s+')
self.lead_spc = re.compile(r'^(%feature\S+\s+\S+\s*?)"\s+(\S)')
self.multi = 0
self.ignores = ['inheritancegraph', 'param', 'listofallmembers',
'innerclass', 'name', 'declname', 'incdepgraph',
'invincdepgraph', 'programlisting', 'type',
'references', 'referencedby', 'location',
'collaborationgraph', 'reimplements',
'reimplementedby', 'derivedcompoundref',
'basecompoundref']
#self.generics = []
self.include_function_definition = include_function_definition
if not include_function_definition:
self.ignores.append('argsstring')
self.quiet = quiet
def generate(self):
"""Parses the file set in the initialization. The resulting
data is stored in `self.pieces`.
"""
self.parse(self.xmldoc)
def parse(self, node):
"""Parse a given node. This function in turn calls the
`parse_<nodeType>` functions which handle the respective
nodes.
"""
pm = getattr(self, "parse_%s"%node.__class__.__name__)
pm(node)
def parse_Document(self, node):
self.parse(node.documentElement)
def parse_Text(self, node):
txt = node.data
txt = txt.replace('\\', r'\\\\')
txt = txt.replace('"', r'\"')
# ignore pure whitespace
m = self.space_re.match(txt)
if m and len(m.group()) == len(txt):
pass
else:
self.add_text(textwrap.fill(txt, break_long_words=False))
def parse_Element(self, node):
"""Parse an `ELEMENT_NODE`. This calls specific
`do_<tagName>` handers for different elements. If no handler
is available the `generic_parse` method is called. All
tagNames specified in `self.ignores` are simply ignored.
"""
name = node.tagName
ignores = self.ignores
if name in ignores:
return
attr = "do_%s" % name
if hasattr(self, attr):
handlerMethod = getattr(self, attr)
handlerMethod(node)
else:
self.generic_parse(node)
#if name not in self.generics: self.generics.append(name)
def parse_Comment(self, node):
"""Parse a `COMMENT_NODE`. This does nothing for now."""
return
def add_text(self, value):
"""Adds text corresponding to `value` into `self.pieces`."""
if type(value) in (types.ListType, types.TupleType):
self.pieces.extend(value)
else:
self.pieces.append(value)
def get_specific_nodes(self, node, names):
"""Given a node and a sequence of strings in `names`, return a
dictionary containing the names as keys and child
`ELEMENT_NODEs`, that have a `tagName` equal to the name.
"""
nodes = [(x.tagName, x) for x in node.childNodes \
if x.nodeType == x.ELEMENT_NODE and \
x.tagName in names]
return dict(nodes)
def generic_parse(self, node, pad=0):
"""A Generic parser for arbitrary tags in a node.
Parameters:
- node: A node in the DOM.
- pad: `int` (default: 0)
If 0 the node data is not padded with newlines. If 1 it
appends a newline after parsing the childNodes. If 2 it
pads before and after the nodes are processed. Defaults to
0.
"""
npiece = 0
if pad:
npiece = len(self.pieces)
if pad == 2:
self.add_text('\n')
for n in node.childNodes:
self.parse(n)
if pad:
if len(self.pieces) > npiece:
self.add_text('\n')
def space_parse(self, node):
self.add_text(' ')
self.generic_parse(node)
do_ref = space_parse
do_emphasis = space_parse
do_bold = space_parse
do_computeroutput = space_parse
do_formula = space_parse
def do_compoundname(self, node):
self.add_text('\n\n')
data = node.firstChild.data
#self.add_text('%%feature("docstring") %s "\n'%data)
self.add_text('%%typemap("csclassmodifiers") %s "\n/**'%data)
def do_compounddef(self, node):
kind = node.attributes['kind'].value
if kind in ('class', 'struct'):
prot = node.attributes['prot'].value
if prot <> 'public':
return
names = ('compoundname', 'briefdescription',
'detaileddescription', 'includes')
first = self.get_specific_nodes(node, names)
for n in names:
if first.has_key(n):
self.parse(first[n])
#self.add_text(['";','\n'])
self.add_text(['*/ public class";','\n'])
for n in node.childNodes:
if n not in first.values():
self.parse(n)
elif kind in ('file', 'namespace'):
nodes = node.getElementsByTagName('sectiondef')
for n in nodes:
self.parse(n)
def do_includes(self, node):
self.add_text('C++ includes: ')
self.generic_parse(node, pad=1)
def do_parameterlist(self, node):
text='unknown'
for key, val in node.attributes.items():
if key == 'kind':
if val == 'param': text = 'Parameters'
elif val == 'exception': text = 'Exceptions'
else: text = val
break
self.add_text(['\n', '\n', text, ':', '\n'])
self.generic_parse(node, pad=1)
def do_para(self, node):
self.add_text('\n')
self.generic_parse(node, pad=1)
def do_parametername(self, node):
self.add_text('\n')
try:
data=node.firstChild.data
except AttributeError: # perhaps a <ref> tag in it
data=node.firstChild.firstChild.data
if data.find('Exception') != -1:
self.add_text(data)
else:
self.add_text("%s: "%data)
def do_parameterdefinition(self, node):
self.generic_parse(node, pad=1)
def do_detaileddescription(self, node):
self.generic_parse(node, pad=1)
def do_briefdescription(self, node):
self.generic_parse(node, pad=1)
def do_memberdef(self, node):
prot = node.attributes['prot'].value
id = node.attributes['id'].value
kind = node.attributes['kind'].value
tmp = node.parentNode.parentNode.parentNode
compdef = tmp.getElementsByTagName('compounddef')[0]
cdef_kind = compdef.attributes['kind'].value
if prot == 'public':
first = self.get_specific_nodes(node, ('definition', 'name'))
name = first['name'].firstChild.data
if name[:8] == 'operator': # Don't handle operators yet.
return
if not first.has_key('definition') or \
kind in ['variable', 'typedef']:
return
if self.include_function_definition:
defn = first['definition'].firstChild.data
else:
defn = ""
self.add_text('\n')
#self.add_text('%feature("docstring") ')
self.add_text('%csmethodmodifiers ')
anc = node.parentNode.parentNode
if cdef_kind in ('file', 'namespace'):
ns_node = anc.getElementsByTagName('innernamespace')
if not ns_node and cdef_kind == 'namespace':
ns_node = anc.getElementsByTagName('compoundname')
if ns_node:
ns = ns_node[0].firstChild.data
self.add_text(' %s::%s "\n%s'%(ns, name, defn))
else:
self.add_text(' %s "\n%s'%(name, defn))
elif cdef_kind in ('class', 'struct'):
# Get the full function name.
anc_node = anc.getElementsByTagName('compoundname')
cname = anc_node[0].firstChild.data
self.add_text(' %s::%s "\n/**\n%s'%(cname, name, defn))
for n in node.childNodes:
if n not in first.values():
self.parse(n)
self.add_text([' */\npublic";', '\n'])
def do_definition(self, node):
data = node.firstChild.data
self.add_text('%s "\n%s'%(data, data))
def do_sectiondef(self, node):
kind = node.attributes['kind'].value
if kind in ('public-func', 'func', 'user-defined', ''):
self.generic_parse(node)
def do_header(self, node):
"""For a user defined section def a header field is present
which should not be printed as such, so we comment it in the
output."""
data = node.firstChild.data
self.add_text('\n/*\n %s \n*/\n'%data)
# If our immediate sibling is a 'description' node then we
# should comment that out also and remove it from the parent
# node's children.
parent = node.parentNode
idx = parent.childNodes.index(node)
if len(parent.childNodes) >= idx + 2:
nd = parent.childNodes[idx+2]
if nd.nodeName == 'description':
nd = parent.removeChild(nd)
self.add_text('\n/*')
self.generic_parse(nd)
self.add_text('\n*/\n')
def do_simplesect(self, node):
kind = node.attributes['kind'].value
if kind in ('date', 'rcs', 'version'):
pass
elif kind == 'warning':
self.add_text(['\n', 'WARNING: '])
self.generic_parse(node)
elif kind == 'see':
self.add_text('\n')
self.add_text('See: ')
self.generic_parse(node)
else:
self.generic_parse(node)
def do_argsstring(self, node):
self.generic_parse(node, pad=1)
def do_member(self, node):
kind = node.attributes['kind'].value
refid = node.attributes['refid'].value
if kind == 'function' and refid[:9] == 'namespace':
self.generic_parse(node)
def do_doxygenindex(self, node):
self.multi = 1
comps = node.getElementsByTagName('compound')
for c in comps:
refid = c.attributes['refid'].value
fname = refid + '.xml'
if not os.path.exists(fname):
fname = os.path.join(self.my_dir, fname)
if not self.quiet:
print "parsing file: %s"%fname
p = Doxy2SWIG(fname, self.include_function_definition, self.quiet)
p.generate()
self.pieces.extend(self.clean_pieces(p.pieces))
def write(self, fname):
o = my_open_write(fname)
if self.multi:
o.write("".join(self.pieces))
else:
o.write("".join(self.clean_pieces(self.pieces)))
o.close()
def clean_pieces(self, pieces):
"""Cleans the list of strings given as `pieces`. It replaces
multiple newlines by a maximum of 2 and returns a new list.
It also wraps the paragraphs nicely.
"""
ret = []
count = 0
for i in pieces:
if i == '\n':
count = count + 1
else:
if i == '";':
if count:
ret.append('\n')
elif count > 2:
ret.append('\n\n')
elif count:
ret.append('\n'*count)
count = 0
ret.append(i)
_data = "".join(ret)
ret = []
for i in _data.split('\n\n'):
if i == 'Parameters:' or i == 'Exceptions:':
ret.extend([i, '\n-----------', '\n\n'])
elif i.find('// File:') > -1: # leave comments alone.
ret.extend([i, '\n'])
else:
_tmp = textwrap.fill(i.strip(), break_long_words=False)
_tmp = self.lead_spc.sub(r'\1"\2', _tmp)
ret.extend([_tmp, '\n\n'])
return ret
def convert(input, output, include_function_definition=True, quiet=False):
p = Doxy2SWIG(input, include_function_definition, quiet)
p.generate()
p.write(output)
def main():
usage = __doc__
parser = optparse.OptionParser(usage)
parser.add_option("-n", '--no-function-definition',
action='store_true',
default=False,
dest='func_def',
help='do not include doxygen function definitions')
parser.add_option("-q", '--quiet',
action='store_true',
default=False,
dest='quiet',
help='be quiet and minimise output')
options, args = parser.parse_args()
if len(args) != 2:
parser.error("error: no input and output specified")
convert(args[0], args[1], not options.func_def, options.quiet)
if __name__ == '__main__':
main()
| apache-2.0 |
tinchoss/Python_Android | python/src/Demo/tkinter/matt/canvas-reading-tag-info.py | 47 | 1678 | from Tkinter import *
class Test(Frame):
def printit(self):
print "hi"
def createWidgets(self):
self.QUIT = Button(self, text='QUIT', foreground='red',
command=self.quit)
self.QUIT.pack(side=BOTTOM, fill=BOTH)
self.drawing = Canvas(self, width="5i", height="5i")
# make a shape
pgon = self.drawing.create_polygon(
10, 10, 110, 10, 110, 110, 10 , 110,
fill="red", tags=("weee", "foo", "groo"))
# this is how you query an object for its attributes
# config options FOR CANVAS ITEMS always come back in tuples of length 5.
# 0 attribute name
# 1 BLANK
# 2 BLANK
# 3 default value
# 4 current value
# the blank spots are for consistency with the config command that
# is used for widgets. (remember, this is for ITEMS drawn
# on a canvas widget, not widgets)
option_value = self.drawing.itemconfig(pgon, "stipple")
print "pgon's current stipple value is -->", option_value[4], "<--"
option_value = self.drawing.itemconfig(pgon, "fill")
print "pgon's current fill value is -->", option_value[4], "<--"
print " when he is usually colored -->", option_value[3], "<--"
## here we print out all the tags associated with this object
option_value = self.drawing.itemconfig(pgon, "tags")
print "pgon's tags are", option_value[4]
self.drawing.pack(side=LEFT)
def __init__(self, master=None):
Frame.__init__(self, master)
Pack.config(self)
self.createWidgets()
test = Test()
test.mainloop()
| apache-2.0 |
Early-Modern-OCR/Cobre | libros/urls.py | 1 | 1837 | import os.path
from django.conf.urls.defaults import *
from django.conf import settings
from django.contrib import admin
from haystack import forms, views, query
from libros.bookreader.models import Book
book_id_re = '[\S^:]+:[\S^:]+:[\d\./]+'
admin.autodiscover()
handler500 # Pyflakes
sqs = query.SearchQuerySet()
sqs.models(Book)
class SearchForm(forms.SearchForm):
def search(self):
sqs = super(SearchForm, self).search()
if hasattr(self, 'cleaned_data'):
self.clean()
if self.is_valid() and hasattr(self, 'cleaned_data'):
sqs = self.searchqueryset.raw_search(self.cleaned_data['q'])
if self.load_all:
sqs = sqs.load_all()
return sqs
search_view = views.SearchView(template='search.html',
form_class=SearchForm)
urlpatterns = patterns('',
#(r'^openid/', include('django_openid_auth.urls')),
url(r'^search/$', search_view, name='haystack-search'),
(r'^admin/', include(admin.site.urls)),
(r'^login/$', 'django.contrib.auth.views.login'),
(r'^logout/$', 'django.contrib.auth.views.logout'),
(r'^%s/(?P<path>.*)$' % (settings.MEDIA_URL.strip('/'),),
'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
(r'^%s/(?P<path>.*)$' % (settings.THEME_URL.strip('/'),),
'django.views.static.serve', {'document_root': settings.THEME_ROOT}),
(r'^%s/(?P<path>.*)$' % (settings.ADMIN_MEDIA_PREFIX.strip('/'),),
'django.views.static.serve',
{'document_root': os.path.join(os.path.dirname(admin.__file__),'media')}),
(r'', include('libros.bookreader.urls')),
url(r'^page/(?P<object_id>\d+)/facebox/$', 'libros.bookreader.views.page.view',
kwargs={'template_name':'bookreader/page/facebox.html'},
name='bookreader-page-facebox'),
)
| apache-2.0 |
emk/pyjamas | examples/showcase/src/demos_widgets/menubar.py | 13 | 2849 | """
The ``ui.MenuBar`` and ``ui.MenuItem`` classes allow you to define menu bars in
your application.
There are several important things to be aware of when adding menus to your
application:
* You have to use a stylesheet to define the look of your menu. The default
style is terrible, as it makes the menu unusable. The following stylesheet
entries were used for the example code below:
.gwt-MenuBar {
background-color: #C3D9FF;
border: 1px solid #87B3FF;
cursor: default;
}
.gwt-MenuBar .gwt-MenuItem {
padding: 1px 4px 1px 4px;
font-size: smaller;
cursor: default;
}
.gwt-MenuBar .gwt-MenuItem-selected {
background-color: #E8EEF7;
}
* By default, each menu item can be associated with a class, whose ``execute``
method will be called when that item is selected. Note that a helper class,
``MenuCmd``, is defined below to allow more than one menu item handler
method to be defined within a single class.
* You add menu items directly, passing the item label and the associated
command to ``MenuBar.addItem()``. For adding sub-menus, you need to wrap
the sub-menu up in a ``MenuItem``, as shown below.
* You can use HTML codes in a menu item's label by calling
``MenuBar.addItem(label, True, cmd)`` instead of ``MenuBar.addItem(label,
cmd)``. Similarly, you can use HTML styling in a menu's title by calling
``MenuItem(label, True, submenu)``, as in the second-to-last line of
``MenubarDemo.__init__``, below.
"""
from pyjamas.ui.SimplePanel import SimplePanel
from pyjamas.ui.MenuBar import MenuBar
from pyjamas.ui.MenuItem import MenuItem
from pyjamas import Window
class MenubarDemo(SimplePanel):
def __init__(self):
SimplePanel.__init__(self)
menu1 = MenuBar(vertical=True)
menu1.addItem("Item 1", MenuCmd(self, "onMenu1Item1"))
menu1.addItem("Item 2", MenuCmd(self, "onMenu1Item2"))
menu2 = MenuBar(vertical=True)
menu2.addItem("Apples", MenuCmd(self, "onMenu2Apples"))
menu2.addItem("Oranges", MenuCmd(self, "onMenu2Oranges"))
menubar = MenuBar(vertical=False)
menubar.addItem(MenuItem("Menu 1", menu1))
menubar.addItem(MenuItem("<i>Menu 2</i>", True, menu2))
self.add(menubar)
def onMenu1Item1(self):
Window.alert("Item 1 selected")
def onMenu1Item2(self):
Window.alert("Item 2 selected")
def onMenu2Apples(self):
Window.alert("Apples selected")
def onMenu2Oranges(self):
Window.alert("Oranges selected")
class MenuCmd:
def __init__(self, object, handler):
self._object = object
self._handler = handler
def execute(self):
handler = getattr(self._object, self._handler)
handler()
| apache-2.0 |
paulruvolo/ThinkStats2 | code/hinc2.py | 68 | 1622 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import numpy as np
import hinc
import thinkplot
import thinkstats2
def InterpolateSample(df, log_upper=6.0):
"""Makes a sample of log10 household income.
Assumes that log10 income is uniform in each range.
df: DataFrame with columns income and freq
log_upper: log10 of the assumed upper bound for the highest range
returns: NumPy array of log10 household income
"""
# compute the log10 of the upper bound for each range
df['log_upper'] = np.log10(df.income)
# get the lower bounds by shifting the upper bound and filling in
# the first element
df['log_lower'] = df.log_upper.shift(1)
df.log_lower[0] = 3.0
# plug in a value for the unknown upper bound of the highest range
df.log_upper[41] = log_upper
# use the freq column to generate the right number of values in
# each range
arrays = []
for _, row in df.iterrows():
vals = np.linspace(row.log_lower, row.log_upper, row.freq)
arrays.append(vals)
# collect the arrays into a single sample
log_sample = np.concatenate(arrays)
return log_sample
def main():
df = hinc.ReadData()
log_sample = InterpolateSample(df, log_upper=6.0)
log_cdf = thinkstats2.Cdf(log_sample)
thinkplot.Cdf(log_cdf)
thinkplot.Show(xlabel='household income',
ylabel='CDF')
if __name__ == "__main__":
main()
| gpl-3.0 |
mancoast/CPythonPyc_test | fail/324_test_asynchat.py | 89 | 9302 | # test asynchat
from test import support
# If this fails, the test will be skipped.
thread = support.import_module('_thread')
import asyncore, asynchat, socket, time
import unittest
import sys
try:
import threading
except ImportError:
threading = None
HOST = support.HOST
SERVER_QUIT = b'QUIT\n'
if threading:
class echo_server(threading.Thread):
# parameter to determine the number of bytes passed back to the
# client each send
chunk_size = 1
def __init__(self, event):
threading.Thread.__init__(self)
self.event = event
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.sock)
# This will be set if the client wants us to wait before echoing data
# back.
self.start_resend_event = None
def run(self):
self.sock.listen(1)
self.event.set()
conn, client = self.sock.accept()
self.buffer = b""
# collect data until quit message is seen
while SERVER_QUIT not in self.buffer:
data = conn.recv(1)
if not data:
break
self.buffer = self.buffer + data
# remove the SERVER_QUIT message
self.buffer = self.buffer.replace(SERVER_QUIT, b'')
if self.start_resend_event:
self.start_resend_event.wait()
# re-send entire set of collected data
try:
# this may fail on some tests, such as test_close_when_done, since
# the client closes the channel when it's done sending
while self.buffer:
n = conn.send(self.buffer[:self.chunk_size])
time.sleep(0.001)
self.buffer = self.buffer[n:]
except:
pass
conn.close()
self.sock.close()
class echo_client(asynchat.async_chat):
def __init__(self, terminator, server_port):
asynchat.async_chat.__init__(self)
self.contents = []
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((HOST, server_port))
self.set_terminator(terminator)
self.buffer = b""
def handle_connect(self):
pass
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
def collect_incoming_data(self, data):
self.buffer += data
def found_terminator(self):
self.contents.append(self.buffer)
self.buffer = b""
def start_echo_server():
event = threading.Event()
s = echo_server(event)
s.start()
event.wait()
event.clear()
time.sleep(0.01) # Give server time to start accepting.
return s, event
@unittest.skipUnless(threading, 'Threading required for this test.')
class TestAsynchat(unittest.TestCase):
usepoll = False
def setUp (self):
self._threads = support.threading_setup()
def tearDown (self):
support.threading_cleanup(*self._threads)
def line_terminator_check(self, term, server_chunk):
event = threading.Event()
s = echo_server(event)
s.chunk_size = server_chunk
s.start()
event.wait()
event.clear()
time.sleep(0.01) # Give server time to start accepting.
c = echo_client(term, s.port)
c.push(b"hello ")
c.push(b"world" + term)
c.push(b"I'm not dead yet!" + term)
c.push(SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join()
self.assertEqual(c.contents, [b"hello world", b"I'm not dead yet!"])
# the line terminator tests below check receiving variously-sized
# chunks back from the server in order to exercise all branches of
# async_chat.handle_read
def test_line_terminator1(self):
# test one-character terminator
for l in (1,2,3):
self.line_terminator_check(b'\n', l)
def test_line_terminator2(self):
# test two-character terminator
for l in (1,2,3):
self.line_terminator_check(b'\r\n', l)
def test_line_terminator3(self):
# test three-character terminator
for l in (1,2,3):
self.line_terminator_check(b'qqq', l)
def numeric_terminator_check(self, termlen):
# Try reading a fixed number of bytes
s, event = start_echo_server()
c = echo_client(termlen, s.port)
data = b"hello world, I'm not dead yet!\n"
c.push(data)
c.push(SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join()
self.assertEqual(c.contents, [data[:termlen]])
def test_numeric_terminator1(self):
# check that ints & longs both work (since type is
# explicitly checked in async_chat.handle_read)
self.numeric_terminator_check(1)
def test_numeric_terminator2(self):
self.numeric_terminator_check(6)
def test_none_terminator(self):
# Try reading a fixed number of bytes
s, event = start_echo_server()
c = echo_client(None, s.port)
data = b"hello world, I'm not dead yet!\n"
c.push(data)
c.push(SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join()
self.assertEqual(c.contents, [])
self.assertEqual(c.buffer, data)
def test_simple_producer(self):
s, event = start_echo_server()
c = echo_client(b'\n', s.port)
data = b"hello world\nI'm not dead yet!\n"
p = asynchat.simple_producer(data+SERVER_QUIT, buffer_size=8)
c.push_with_producer(p)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join()
self.assertEqual(c.contents, [b"hello world", b"I'm not dead yet!"])
def test_string_producer(self):
s, event = start_echo_server()
c = echo_client(b'\n', s.port)
data = b"hello world\nI'm not dead yet!\n"
c.push_with_producer(data+SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join()
self.assertEqual(c.contents, [b"hello world", b"I'm not dead yet!"])
def test_empty_line(self):
# checks that empty lines are handled correctly
s, event = start_echo_server()
c = echo_client(b'\n', s.port)
c.push(b"hello world\n\nI'm not dead yet!\n")
c.push(SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join()
self.assertEqual(c.contents,
[b"hello world", b"", b"I'm not dead yet!"])
def test_close_when_done(self):
s, event = start_echo_server()
s.start_resend_event = threading.Event()
c = echo_client(b'\n', s.port)
c.push(b"hello world\nI'm not dead yet!\n")
c.push(SERVER_QUIT)
c.close_when_done()
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
# Only allow the server to start echoing data back to the client after
# the client has closed its connection. This prevents a race condition
# where the server echoes all of its data before we can check that it
# got any down below.
s.start_resend_event.set()
s.join()
self.assertEqual(c.contents, [])
# the server might have been able to send a byte or two back, but this
# at least checks that it received something and didn't just fail
# (which could still result in the client not having received anything)
self.assertGreater(len(s.buffer), 0)
class TestAsynchat_WithPoll(TestAsynchat):
usepoll = True
class TestHelperFunctions(unittest.TestCase):
def test_find_prefix_at_end(self):
self.assertEqual(asynchat.find_prefix_at_end("qwerty\r", "\r\n"), 1)
self.assertEqual(asynchat.find_prefix_at_end("qwertydkjf", "\r\n"), 0)
class TestFifo(unittest.TestCase):
def test_basic(self):
f = asynchat.fifo()
f.push(7)
f.push(b'a')
self.assertEqual(len(f), 2)
self.assertEqual(f.first(), 7)
self.assertEqual(f.pop(), (1, 7))
self.assertEqual(len(f), 1)
self.assertEqual(f.first(), b'a')
self.assertEqual(f.is_empty(), False)
self.assertEqual(f.pop(), (1, b'a'))
self.assertEqual(len(f), 0)
self.assertEqual(f.is_empty(), True)
self.assertEqual(f.pop(), (0, None))
def test_given_list(self):
f = asynchat.fifo([b'x', 17, 3])
self.assertEqual(len(f), 3)
self.assertEqual(f.pop(), (1, b'x'))
self.assertEqual(f.pop(), (1, 17))
self.assertEqual(f.pop(), (1, 3))
self.assertEqual(f.pop(), (0, None))
def test_main(verbose=None):
support.run_unittest(TestAsynchat, TestAsynchat_WithPoll,
TestHelperFunctions, TestFifo)
if __name__ == "__main__":
test_main(verbose=True)
| gpl-3.0 |
gautamMalu/rootfs_xen_arndale | usr/lib/python3.4/encodings/koi8_r.py | 272 | 13779 | """ Python Character Mapping Codec koi8_r generated from 'MAPPINGS/VENDORS/MISC/KOI8-R.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='koi8-r',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u2500' # 0x80 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u2502' # 0x81 -> BOX DRAWINGS LIGHT VERTICAL
'\u250c' # 0x82 -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2510' # 0x83 -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x84 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2518' # 0x85 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u251c' # 0x86 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2524' # 0x87 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u252c' # 0x88 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u2534' # 0x89 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u253c' # 0x8A -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u2580' # 0x8B -> UPPER HALF BLOCK
'\u2584' # 0x8C -> LOWER HALF BLOCK
'\u2588' # 0x8D -> FULL BLOCK
'\u258c' # 0x8E -> LEFT HALF BLOCK
'\u2590' # 0x8F -> RIGHT HALF BLOCK
'\u2591' # 0x90 -> LIGHT SHADE
'\u2592' # 0x91 -> MEDIUM SHADE
'\u2593' # 0x92 -> DARK SHADE
'\u2320' # 0x93 -> TOP HALF INTEGRAL
'\u25a0' # 0x94 -> BLACK SQUARE
'\u2219' # 0x95 -> BULLET OPERATOR
'\u221a' # 0x96 -> SQUARE ROOT
'\u2248' # 0x97 -> ALMOST EQUAL TO
'\u2264' # 0x98 -> LESS-THAN OR EQUAL TO
'\u2265' # 0x99 -> GREATER-THAN OR EQUAL TO
'\xa0' # 0x9A -> NO-BREAK SPACE
'\u2321' # 0x9B -> BOTTOM HALF INTEGRAL
'\xb0' # 0x9C -> DEGREE SIGN
'\xb2' # 0x9D -> SUPERSCRIPT TWO
'\xb7' # 0x9E -> MIDDLE DOT
'\xf7' # 0x9F -> DIVISION SIGN
'\u2550' # 0xA0 -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u2551' # 0xA1 -> BOX DRAWINGS DOUBLE VERTICAL
'\u2552' # 0xA2 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u0451' # 0xA3 -> CYRILLIC SMALL LETTER IO
'\u2553' # 0xA4 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u2554' # 0xA5 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2555' # 0xA6 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2556' # 0xA7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2557' # 0xA8 -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u2558' # 0xA9 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2559' # 0xAA -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u255a' # 0xAB -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u255b' # 0xAC -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u255c' # 0xAD -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255d' # 0xAE -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255e' # 0xAF -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0xB0 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u2560' # 0xB1 -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2561' # 0xB2 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u0401' # 0xB3 -> CYRILLIC CAPITAL LETTER IO
'\u2562' # 0xB4 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2563' # 0xB5 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2564' # 0xB6 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0xB7 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2566' # 0xB8 -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2567' # 0xB9 -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0xBA -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2569' # 0xBB -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u256a' # 0xBC -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u256b' # 0xBD -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256c' # 0xBE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\xa9' # 0xBF -> COPYRIGHT SIGN
'\u044e' # 0xC0 -> CYRILLIC SMALL LETTER YU
'\u0430' # 0xC1 -> CYRILLIC SMALL LETTER A
'\u0431' # 0xC2 -> CYRILLIC SMALL LETTER BE
'\u0446' # 0xC3 -> CYRILLIC SMALL LETTER TSE
'\u0434' # 0xC4 -> CYRILLIC SMALL LETTER DE
'\u0435' # 0xC5 -> CYRILLIC SMALL LETTER IE
'\u0444' # 0xC6 -> CYRILLIC SMALL LETTER EF
'\u0433' # 0xC7 -> CYRILLIC SMALL LETTER GHE
'\u0445' # 0xC8 -> CYRILLIC SMALL LETTER HA
'\u0438' # 0xC9 -> CYRILLIC SMALL LETTER I
'\u0439' # 0xCA -> CYRILLIC SMALL LETTER SHORT I
'\u043a' # 0xCB -> CYRILLIC SMALL LETTER KA
'\u043b' # 0xCC -> CYRILLIC SMALL LETTER EL
'\u043c' # 0xCD -> CYRILLIC SMALL LETTER EM
'\u043d' # 0xCE -> CYRILLIC SMALL LETTER EN
'\u043e' # 0xCF -> CYRILLIC SMALL LETTER O
'\u043f' # 0xD0 -> CYRILLIC SMALL LETTER PE
'\u044f' # 0xD1 -> CYRILLIC SMALL LETTER YA
'\u0440' # 0xD2 -> CYRILLIC SMALL LETTER ER
'\u0441' # 0xD3 -> CYRILLIC SMALL LETTER ES
'\u0442' # 0xD4 -> CYRILLIC SMALL LETTER TE
'\u0443' # 0xD5 -> CYRILLIC SMALL LETTER U
'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
'\u0432' # 0xD7 -> CYRILLIC SMALL LETTER VE
'\u044c' # 0xD8 -> CYRILLIC SMALL LETTER SOFT SIGN
'\u044b' # 0xD9 -> CYRILLIC SMALL LETTER YERU
'\u0437' # 0xDA -> CYRILLIC SMALL LETTER ZE
'\u0448' # 0xDB -> CYRILLIC SMALL LETTER SHA
'\u044d' # 0xDC -> CYRILLIC SMALL LETTER E
'\u0449' # 0xDD -> CYRILLIC SMALL LETTER SHCHA
'\u0447' # 0xDE -> CYRILLIC SMALL LETTER CHE
'\u044a' # 0xDF -> CYRILLIC SMALL LETTER HARD SIGN
'\u042e' # 0xE0 -> CYRILLIC CAPITAL LETTER YU
'\u0410' # 0xE1 -> CYRILLIC CAPITAL LETTER A
'\u0411' # 0xE2 -> CYRILLIC CAPITAL LETTER BE
'\u0426' # 0xE3 -> CYRILLIC CAPITAL LETTER TSE
'\u0414' # 0xE4 -> CYRILLIC CAPITAL LETTER DE
'\u0415' # 0xE5 -> CYRILLIC CAPITAL LETTER IE
'\u0424' # 0xE6 -> CYRILLIC CAPITAL LETTER EF
'\u0413' # 0xE7 -> CYRILLIC CAPITAL LETTER GHE
'\u0425' # 0xE8 -> CYRILLIC CAPITAL LETTER HA
'\u0418' # 0xE9 -> CYRILLIC CAPITAL LETTER I
'\u0419' # 0xEA -> CYRILLIC CAPITAL LETTER SHORT I
'\u041a' # 0xEB -> CYRILLIC CAPITAL LETTER KA
'\u041b' # 0xEC -> CYRILLIC CAPITAL LETTER EL
'\u041c' # 0xED -> CYRILLIC CAPITAL LETTER EM
'\u041d' # 0xEE -> CYRILLIC CAPITAL LETTER EN
'\u041e' # 0xEF -> CYRILLIC CAPITAL LETTER O
'\u041f' # 0xF0 -> CYRILLIC CAPITAL LETTER PE
'\u042f' # 0xF1 -> CYRILLIC CAPITAL LETTER YA
'\u0420' # 0xF2 -> CYRILLIC CAPITAL LETTER ER
'\u0421' # 0xF3 -> CYRILLIC CAPITAL LETTER ES
'\u0422' # 0xF4 -> CYRILLIC CAPITAL LETTER TE
'\u0423' # 0xF5 -> CYRILLIC CAPITAL LETTER U
'\u0416' # 0xF6 -> CYRILLIC CAPITAL LETTER ZHE
'\u0412' # 0xF7 -> CYRILLIC CAPITAL LETTER VE
'\u042c' # 0xF8 -> CYRILLIC CAPITAL LETTER SOFT SIGN
'\u042b' # 0xF9 -> CYRILLIC CAPITAL LETTER YERU
'\u0417' # 0xFA -> CYRILLIC CAPITAL LETTER ZE
'\u0428' # 0xFB -> CYRILLIC CAPITAL LETTER SHA
'\u042d' # 0xFC -> CYRILLIC CAPITAL LETTER E
'\u0429' # 0xFD -> CYRILLIC CAPITAL LETTER SHCHA
'\u0427' # 0xFE -> CYRILLIC CAPITAL LETTER CHE
'\u042a' # 0xFF -> CYRILLIC CAPITAL LETTER HARD SIGN
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-2.0 |
opendaylight/opendove | odcs/data_handler/python/dcs_objects/DPSNodeStatistics.py | 1 | 26790 | '''
@copyright (c) 2010-2013 IBM Corporation
All rights reserved.
This program and the accompanying materials are made available under the
terms of the Eclipse Public License v1.0 which accompanies this
distribution, and is available at http://www.eclipse.org/legal/epl-v10.html
@author: Amitabha Biswas
'''
import logging
import time
#from client_protocol_handler import DpsClientHandler
from object_collection import DpsLogLevels
from logging import getLogger
log = getLogger(__name__)
import dcslib
class DPSNodeStatValues:
'''
This represent the weight and values of each statistical category.
Currently the MAJOR statistical categories are:
1. Memory Consumed
2. CPU utilization
The statistical weight of a combination of F1, F2, F3
where the weight of Fn = Wn is computed as
(F1*W1 + F2*W2 + .... Fk*Wk)/(W1 + W2 + ... + Wk)
The computation is in terms of 100%.
Note that the Limit is not 100% i.e. it's possible for a Node to
be above 100%. This is due to the facts that the maximum possible
values are really plucked out of the air. Those max values can
vary based on system capability.
'''
CPU = 'CPU'
Memory = 'Memory'
Domains = 'Domains'
#Weights attributed to each entity for a Node
Domain_Average_Per_Node = 500
Node_Weight_Set = {CPU: 25, Memory: 50, Domains: 25}
Node_Weight_Total = sum(Node_Weight_Set.values())
#Weights attributed to each entity for a Domain in a Node
Node_Domain_Weight_Set = {CPU: 34, Memory: 66}
Node_Domain_Weight_Total = sum(Node_Domain_Weight_Set.values())
#CPU Limits per Sec
Cpu_Limit_Endpoint_Update_Per_Sec = 5000
Cpu_Limit_Endpoint_Lookup_Per_Sec = 100000
Cpu_Limit_Policy_Lookup_Per_Sec = 50000
#Memory Limits Overall
Memory_Limit_Endpoints = 50000
Memory_Limit_Tunnels = 5000
#Heavy Load (Percentage: 100%)
Heavy_Load_Default = 60
Heavy_Load = 60
class DPSNodeDomainStatistics:
'''
This represents the Statistics of a domain on a node
'''
# The CPU utilization values are stored in a circular array with 2 rows.
# The current index of the row represents the latest values. In the example
# below row 1 (with the latest) time is the current index.
# The values in the row represents the number of operations between
# Time Start and Time End in a row.
# The reason we need 2 rows is because when a row moves forward e.g. from
# array index 0 --> 1, the values in that new row are 0s, so for some time
# we still need the old row 0 to compute the values.
# --------------------------------------------------------------
# | Index | Time Start | Time End | Update | Lookup | Policy |
# --------------------------------------------------------------
# | 0 | T | T+1 | U(T+1) | L(T+1) | P(T+1) |
# --------------------------------------------------------------
# | 1 | T+1 | latest |U(late.)|L(late.)|P(late.)|
# --------------------------------------------------------------
cpu_time_start_column = 0
cpu_time_end_column = 1
cpu_update_column = 2
cpu_lookup_column = 3
cpu_policy_column = 4
cpu_max_time_diff_row = 120 #2 minutes between Time Start and End.
#after 2 minutes move to next row
def __init__(self, domain_id):
'''
Constructor:
@param domain_id: The Domain ID
@type domain_id: Integer
'''
self.domain_id = domain_id
#(Memory)
self.endpoints = 0
self.tunnels = 0
#(CPU) Array
self.cpu_array = []
curr_time = time.time()
self.cpu_array.append([curr_time, curr_time, 0, 0, 0])
self.cpu_array.append([curr_time, curr_time, 0, 0, 0])
#[time.time(), time.time(), 0, 0, 0]]
self.cpu_index = 0
def update_statistics(self, endpoints, tunnels, update_delta, lookup_delta, policy_delta):
'''
This routine updates the domain statistics in a Node
@param endpoints: The Number of Endpoints in that Domain
@type endpoints: Integer
@param tunnels: The Number of Tunnels in that Domain
@type tunnels: Integer
@param update_delta: The Number of Updates that were done since the last update
@type update_delta: Integer
@param lookup_delta: The Number of Lookups that were done since the last update
@type lookup_delta: Integer
@param policy_delta: The Number of Policy Lookups that were done since the last update
@type policy_delta: Integer
'''
#print 'Domain [%s]: Update\r'%self.domain_id
cpu_row = self.cpu_array[self.cpu_index]
self.endpoints = endpoints
self.tunnels = tunnels
curr_time = time.time()
curr_index_start_time = cpu_row[self.cpu_time_start_column]
if curr_time - curr_index_start_time > self.cpu_max_time_diff_row:
#Need to move to next index
if self.cpu_index == 0:
self.cpu_index = 1
else:
self.cpu_index = 0
cpu_row = self.cpu_array[self.cpu_index]
cpu_row[self.cpu_time_start_column] = curr_time
cpu_row[self.cpu_update_column] = 0
cpu_row[self.cpu_lookup_column] = 0
cpu_row[self.cpu_policy_column] = 0
#Update the current row
#print 'Domain [%s]: cpu_index %s, row %s\r'%(self.domain_id, self.cpu_index, cpu_row)
cpu_row[self.cpu_update_column] += update_delta
cpu_row[self.cpu_lookup_column] += lookup_delta
cpu_row[self.cpu_policy_column] += policy_delta
cpu_row[self.cpu_time_end_column] = curr_time
#print 'Domain [%s]: cpu_index %s, row %s\r'%(self.domain_id, self.cpu_index, cpu_row)
#self.load_show()
#print 'update_statistics: Exit self.domain_id %s\r'%self.domain_id
return
def load_cpu(self):
'''
This routine determines the cpu load in (numerical) value on a domain on a
node
@return - The load
@rtype - Float
'''
curr_index = self.cpu_index
if curr_index == 0:
prev_index = 1
else:
prev_index = 0
curr_row = self.cpu_array[curr_index]
prev_row = self.cpu_array[prev_index]
#Get CPU load
cpu_load = float(0)
while True:
time_diff = curr_row[self.cpu_time_end_column] - prev_row[self.cpu_time_start_column]
if time_diff <= 0:
break
#Endpoint Update
eu_load = float(curr_row[self.cpu_update_column] + prev_row[self.cpu_update_column])
eu_load = (eu_load * 100)/float(DPSNodeStatValues.Cpu_Limit_Endpoint_Update_Per_Sec)
eu_load = eu_load/time_diff
#Endpoint Lookup
el_load = float(curr_row[self.cpu_lookup_column] + prev_row[self.cpu_lookup_column])
# print 'Domain [%s]: Endpoint Lookup curr_row %s [%s], prev_row %s [%s]\r'%(self.domain_id,
# curr_index,
# curr_row[self.cpu_lookup_column],
# prev_index,
# prev_row[self.cpu_lookup_column])
el_load = (el_load * 100)/float(DPSNodeStatValues.Cpu_Limit_Endpoint_Lookup_Per_Sec)
el_load = el_load/time_diff
#print 'Domain [%s]: Endpoint Lookup, Time Diff %s (secs), Load %s\r'%(self.domain_id, time_diff, el_load)
#Policy Lookup
pl_load = float(curr_row[self.cpu_policy_column] + prev_row[self.cpu_policy_column])
pl_load = (pl_load * 100)/float(DPSNodeStatValues.Cpu_Limit_Policy_Lookup_Per_Sec)
pl_load = pl_load/time_diff
#Don't divide by 3 here since each is a max possible value on the system
cpu_load = eu_load + el_load + pl_load
#print 'Domain [%s]: CPU Load %s\r'%(self.domain_id, cpu_load)
break
return cpu_load
def load_memory(self):
'''
This routine determines the memory load in (numerical) value on a domain on a
node
@return - The load
@rtype - Float
'''
#Endpoint
em_load = float(self.endpoints*100)/float(DPSNodeStatValues.Memory_Limit_Endpoints)
#Tunnels
tm_load = float(self.tunnels*100)/float(DPSNodeStatValues.Memory_Limit_Tunnels)
#Divide by 2 here since endpoint and tunnels are by themselves not max values
memory_load = (em_load + tm_load)/2
return memory_load
def load(self):
'''
This routine determines the load in (numerical) value on a domain on a
node
@return - The load
@rtype - Float
'''
cpu_load = self.load_cpu()
memory_load = self.load_memory()
#Get Total Load
load = ((cpu_load * DPSNodeStatValues.Node_Domain_Weight_Set[DPSNodeStatValues.CPU]) +
(memory_load * DPSNodeStatValues.Node_Domain_Weight_Set[DPSNodeStatValues.Memory]))
load = load/DPSNodeStatValues.Node_Domain_Weight_Total
return load
def load_show(self):
'''
This routine shows the load on the domain
'''
cpu_load = self.load_cpu()
memory_load = self.load_memory()
#Get Total Load
load = ((cpu_load * DPSNodeStatValues.Node_Domain_Weight_Set[DPSNodeStatValues.CPU]) +
(memory_load * DPSNodeStatValues.Node_Domain_Weight_Set[DPSNodeStatValues.Memory]))
load = load/DPSNodeStatValues.Node_Domain_Weight_Total
print 'Domain %s: CPU %.2f, Memory %.2f, Total(Amortized) %.2f\r'%(self.domain_id,
cpu_load,
memory_load,
load)
return
@staticmethod
def load_min(NodeDomain1, NodeDomain2):
'''
This method determines the lesser loaded of the 2 domains
@param nodedomain1: The Domain1 on Node
@type nodedomain1: DPSNodeDomainStatistics
@param nodedomain2: The Domain2 on Node
@type nodedomain2: DPSNodeDomainStatistics
'''
load1 = NodeDomain1.load()
load2 = NodeDomain2.load()
if load1 > load2:
return NodeDomain2
else:
return NodeDomain1
@staticmethod
def load_max(NodeDomain1, NodeDomain2):
'''
This method determines the higher loaded of the 2 domains
@param nodedomain1: The Domain1 on Node
@type nodedomain1: DPSNodeDomainStatistics
@param nodedomain2: The Domain2 on Node
@type nodedomain2: DPSNodeDomainStatistics
'''
load1 = NodeDomain1.load()
load2 = NodeDomain2.load()
if load1 > load2:
return NodeDomain1
else:
return NodeDomain2
@staticmethod
def load_min_array(NodeDomains):
'''
This gets the lowest loaded Domain in the Array
'''
lowest = None
for nd in NodeDomains:
if lowest is None:
lowest = nd
continue
lowest = DPSNodeDomainStatistics.load_min(lowest, nd)
return lowest
@staticmethod
def load_max_array(NodeDomains):
'''
This gets the lowest loaded Domain in the Array
'''
highest = None
for nd in NodeDomains:
if highest is None:
highest = nd
continue
highest = DPSNodeDomainStatistics.load_max(highest, nd)
return highest
@staticmethod
def load_array(NodeDomains):
'''
This method computes the cumulative load of all the domains in the node
@param NodeDomains: List of DPSNodeDomainStatistics
@type NodeDomains: [DPSNodeDomainStatistics]
@return: The Load (CPU+Memory+Domains) on the List
@rtype: float
'''
load_cpu = float(0)
load_memory = float(0)
for nd in NodeDomains:
load_cpu += nd.load_cpu()
load_memory += nd.load_memory()
load_domain = (float(len(NodeDomains))*100)/float(DPSNodeStatValues.Domain_Average_Per_Node)
#Get Total Load
load = ((load_cpu * DPSNodeStatValues.Node_Weight_Set[DPSNodeStatValues.CPU]) +
(load_memory * DPSNodeStatValues.Node_Weight_Set[DPSNodeStatValues.Memory]) +
(load_domain * DPSNodeStatValues.Node_Weight_Set[DPSNodeStatValues.Domains]))/(DPSNodeStatValues.Node_Weight_Total)
return load
class DPSNodeStatistics:
'''
This class represents the statistics and load of a DPS Node
'''
def __init__(self, location):
'''
Constructor
@param location: The Location of the DPS Node
@type location: IPAddressLocation
'''
self.location = location
self.domain_statistics = {}
def domain_add(self, domain_id):
'''
This routine adds a domain to the DPSNodeStatistics
@param domain_id: The Domain ID
@type domain_id: Integer
'''
try:
domain_stats = self.domain_statistics[domain_id]
except Exception:
domain_stats = DPSNodeDomainStatistics(domain_id)
self.domain_statistics[domain_id] = domain_stats
return
def domain_delete(self, domain_id):
'''
@param domain_id: The Domain ID
@type domain_id: Integer
'''
try:
del self.domain_statistics[domain_id]
except Exception:
pass
return
def domain_delete_all(self):
'''
Delete all the Domains
'''
self.domain_statistics.clear()
return
def domains_get(self):
'''
This returns the list of domain ids
'''
return self.domain_statistics.keys()
def update_statistics(self, domain_id, endpoints, tunnels, update_delta, lookup_delta, policy_delta):
'''
This routine updates the domain statistics in a Node
@param domain_id: The Domain ID
@type domain_id: Integer
@param endpoints: The Number of Endpoints in that Domain
@type endpoints: Integer
@param tunnels: The Number of Tunnels in that Domain
@type tunnels: Integer
@param update_delta: The Number of Updates that were done since the last update
@type update_delta: Integer
@param lookup_delta: The Number of Lookups that were done since the last update
@type lookup_delta: Integer
@param policy_delta: The Number of Policy Lookups that were done since the last update
@type policy_delta: Integer
'''
try:
domain_stats = self.domain_statistics[domain_id]
domain_stats.update_statistics(endpoints, tunnels, update_delta, lookup_delta, policy_delta)
except Exception:
pass
return
def heavy_load(self):
'''
This routine returns if this node is heavily loaded.
@return: True if heavy load, False otherwise
@rtype: Boolean
'''
my_load = DPSNodeDomainStatistics.load_array(self.domain_statistics.values())
message = 'Node %s, Load %s'%(self.location.show_ip(), my_load)
dcslib.dps_cluster_write_log(DpsLogLevels.NOTICE, message)
if my_load > DPSNodeStatValues.Heavy_Load:
return (True, my_load)
else:
return (False, my_load)
@staticmethod
def load_min(node1, node2):
'''
This routine determine the lower loaded node of the 2 nodes
@param node1: Node1
@type node1: DPSNodeStatistics
@param node2: Node2
@type node2: DPSNodeStatistics
@return: The lower loaded of the 2 nodes
@rtype: DPSNodeStatistics
'''
load_node1 = DPSNodeDomainStatistics.load_array(node1.domain_statistics.values())
load_node2 = DPSNodeDomainStatistics.load_array(node2.domain_statistics.values())
if load_node1 < load_node2:
return node1
else:
return node2
@staticmethod
def load_max(node1, node2):
'''
This routine determine the higher loaded node of the 2 nodes
@param node1: Node1
@type node1: DPSNodeStatistics
@param node2: Node2
@type node2: DPSNodeStatistics
@return: The higher loaded of the 2 nodes
@rtype: DPSNodeStatistics
'''
load_node1 = DPSNodeDomainStatistics.load_array(node1.domain_statistics.values())
load_node2 = DPSNodeDomainStatistics.load_array(node2.domain_statistics.values())
if load_node1 > load_node2:
return node1
else:
return node2
@staticmethod
def load_min_array(nodes):
'''
This routine determines the lowest loaded node in the array of nodes
@param nodes: Array of DPSNodeStatistics
@type nodes:[DPSNodeStatistics]
@return: The lowest loaded node in the array
@rtype: DPSNodeStatistics
'''
lowest = None
for node in nodes:
if lowest is None:
lowest = node
continue
lowest = DPSNodeStatistics.load_min(lowest, node)
return lowest
@staticmethod
def load_max_array(nodes):
'''
This routine determines the highest loaded node in the array of nodes
@param nodes: Array of DPSNodeStatistics
@type nodes:[DPSNodeStatistics]
@return: The highest loaded node in the array
@rtype: DPSNodeStatistics
'''
highest = None
for node in nodes:
if highest is None:
highest = node
continue
highest = DPSNodeStatistics.load_max(highest, node)
return highest
@staticmethod
def load_min_max_array(nodes):
'''
This routine determines the highest loaded node in the array of nodes
@param nodes: Array of DPSNodeStatistics
@type nodes:[DPSNodeStatistics]
@return: lowest and highest loaded node in the array
@rtype: (DPSNodeStatistics, DPSNodeStatistics)
'''
highest = None
lowest = None
for node in nodes:
if highest is None:
highest = node
else:
highest = DPSNodeStatistics.load_max(highest, node)
if lowest is None:
lowest = node
else:
lowest = DPSNodeStatistics.load_min(lowest, node)
return (lowest, highest)
@staticmethod
def load_available_nodes(nodes, num, fhigh):
'''
This routine determines the lowest or highest loaded nodes in the array
@param nodes: Array of DPSNodeStatistics
@type nodes:[DPSNodeStatistics]
@param num: The number of nodes needed
@type num: Integer
@param fhigh: If this value is True, then this routine will return
Highest Loaded nodes otherwise the Lowest Loaded Nodes
@type fhigh: Boolean
@return: List of loaded nodes - maximum number "num"
@rtype:[ip_value1, ip_value2]...
'''
#The set of nodes index by Load
Node_Array = []
Load_Set = {}
for node in nodes:
load = DPSNodeDomainStatistics.load_array(node.domain_statistics.values())
Load_Set[node.location.ip_value] = load
#Sort the Dictionary based on Load i.e. value
Load_Node_Array = sorted([(value,key) for (key,value) in Load_Set.items()],reverse=fhigh)
for i in range(num):
try:
load_tuple = Load_Node_Array[i]
Node_Array.append(load_tuple[1])
except Exception:
break
return Node_Array
@staticmethod
def load_available_nodes_packed(nodes, num, fhigh):
'''
This routine determines the lowest or highest loaded nodes in the array
@param nodes: Array of DPSNodeStatistics
@type nodes:[DPSNodeStatistics]
@param num: The number of nodes needed
@type num: Integer
@param fhigh: If this value is True, then this routine will return
Highest Loaded nodes otherwise the Lowest Loaded Nodes
@type fhigh: Boolean
@return: List of loaded nodes - maximum number "num"
@rtype:[ip_packed1, ip_packed2]...
'''
#The set of nodes index by Load
Node_Array = []
Load_Set = {}
for node in nodes:
load = DPSNodeDomainStatistics.load_array(node.domain_statistics.values())
Load_Set[node.location.ip_value_packed] = load
#Sort the Dictionary based on Load i.e. value
Load_Node_Array = sorted([(value,key) for (key,value) in Load_Set.items()],reverse=fhigh)
for i in range(num):
try:
load_tuple = Load_Node_Array[i]
Node_Array.append(load_tuple[1])
except Exception:
break
return Node_Array
@staticmethod
def load_balance_domain(node_high, node_low):
'''
This routine determines which domain can be moved from node1 to
node2 or vice-versa that will minimize the load differential
between the two i.e. minimize |load(node_high) - load(node_low)|
@param node_high: The Higher Loaded Node
@type node_high: DPSNodeStatistics
@param node_low: The Lower Loaded Node
@type node_low: DPSNodeStatistics
@return: domain_id
@rtype: Integer
@raise: Exception if node_high is already lower than node_low
or no domain is found
'''
#print 'load_balance_domain: Enter High %s, Low %s\r'%(node_high.location.show_ip(),
# node_low.location.show_ip())
load_high = DPSNodeDomainStatistics.load_array(node_high.domain_statistics.values())
load_low = DPSNodeDomainStatistics.load_array(node_low.domain_statistics.values())
#print 'load_balance_domain: load_high %s, load_low %s\r'%(load_high, load_low)
if load_high <= load_low:
raise Exception('Load on Node %s[%.2f], already lower than Node %s[%.2f]'%(node_high.location.show_ip(),
load_high,
node_low.location.show_ip(),
load_low))
domain_id = None
domain_ids = {}
for node_domain in node_high.domain_statistics.values():
#Check if this domain is present in the lower node
domain_id = node_domain.domain_id
try:
node_domain_low = node_low.domain_statistics[domain_id]
continue
except Exception:
pass
#Determine the Load if this domain moved from node_high to node_low
load_domain = node_domain.load()
#print 'Loop: domain %s, load %s\r'%(node_domain.domain_id, load_domain)
if load_domain > load_high:
message = 'Coding Error? Load on domain %s[%.2f] on node %s higher the load on node %.2f'%(node_domain.domain_id,
load_domain,
node_high.location.show_ip(),
load_high,
)
dcslib.dps_cluster_write_log(DpsLogLevels.WARNING, message)
continue
load_high_new = load_high - load_domain
load_low_new = load_low + load_domain
load_diff_new = abs(load_high_new - load_low_new)
domain_ids[node_domain] = load_diff_new
if domain_id is None:
raise Exception('No suitable domain found')
#Sort the List based on values
domain_tuples = sorted([(value,key) for (key,value) in domain_ids.items()])
#Only chose the 10 differential lowest domains i.e. the domains that cause max swing in load diff
domain_tuples = domain_tuples[:10]
print 'domain_tuples: %s\r'%domain_tuples
domain_list = []
load_high_new = load_high
load_low_new = load_low
for domain_tuple in domain_tuples:
node_domain = domain_tuple[1]
domain_id = node_domain.domain_id
load_domain = node_domain.load()
load_high_new -= load_domain
load_low_new += load_domain
if load_low_new > DPSNodeStatValues.Heavy_Load:
break
if load_high_new < load_low_new:
break
domain_list.append(domain_id)
#print 'load_balance_domain: Exit domain_id %s\r'%domain_id
if len(domain_list) == 0:
raise Exception('No suitable domain found')
print 'domain_list: %s\r'%domain_list
return domain_list
def show(self):
'''
Show
'''
print '------------------------------------------------------------------\r'
print 'Load on DPS Node %s\r'%self.location.show_ip()
load_cpu = float(0)
load_memory = float(0)
for domain in self.domain_statistics.keys():
try:
nd = self.domain_statistics[domain]
except Exception:
continue
load_cpu += nd.load_cpu()
load_memory += nd.load_memory()
nd.load_show()
total_load = DPSNodeDomainStatistics.load_array(self.domain_statistics.values())
print 'Load: CPU %.3f, Memory %.3f, Overall(Amortized) %.3f\r'%(load_cpu, load_memory, total_load)
print '------------------------------------------------------------------\r'
return
| epl-1.0 |
sebalix/OpenUpgrade | addons/base_gengo/ir_translation.py | 343 | 4344 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
LANG_CODE_MAPPING = {
'ar_SY': ('ar', 'Arabic'),
'id_ID': ('id', 'Indonesian'),
'nl_NL': ('nl', 'Dutch'),
'fr_CA': ('fr-ca', 'French (Canada)'),
'pl_PL': ('pl', 'Polish'),
'zh_TW': ('zh-tw', 'Chinese (Traditional)'),
'sv_SE': ('sv', 'Swedish'),
'ko_KR': ('ko', 'Korean'),
'pt_PT': ('pt', 'Portuguese (Europe)'),
'en_US': ('en', 'English'),
'ja_JP': ('ja', 'Japanese'),
'es_ES': ('es', 'Spanish (Spain)'),
'zh_CN': ('zh', 'Chinese (Simplified)'),
'de_DE': ('de', 'German'),
'fr_FR': ('fr', 'French'),
'fr_BE': ('fr', 'French'),
'ru_RU': ('ru', 'Russian'),
'it_IT': ('it', 'Italian'),
'pt_BR': ('pt-br', 'Portuguese (Brazil)'),
'th_TH': ('th', 'Thai'),
'nb_NO': ('no', 'Norwegian'),
'ro_RO': ('ro', 'Romanian'),
'tr_TR': ('tr', 'Turkish'),
'bg_BG': ('bg', 'Bulgarian'),
'da_DK': ('da', 'Danish'),
'en_GB': ('en-gb', 'English (British)'),
'el_GR': ('el', 'Greek'),
'vi_VN': ('vi', 'Vietnamese'),
'he_IL': ('he', 'Hebrew'),
'hu_HU': ('hu', 'Hungarian'),
'fi_FI': ('fi', 'Finnish')
}
class ir_translation(osv.Model):
_name = "ir.translation"
_inherit = "ir.translation"
_columns = {
'gengo_comment': fields.text("Comments & Activity Linked to Gengo"),
'order_id': fields.char('Gengo Order ID', size=32),
"gengo_translation": fields.selection([('machine', 'Translation By Machine'),
('standard', 'Standard'),
('pro', 'Pro'),
('ultra', 'Ultra')], "Gengo Translation Service Level", help='You can select here the service level you want for an automatic translation using Gengo.'),
}
def _get_all_supported_languages(self, cr, uid, context=None):
flag, gengo = self.pool.get('base.gengo.translations').gengo_authentication(cr, uid, context=context)
if not flag:
raise osv.except_osv(_('Gengo Authentication Error'), gengo)
supported_langs = {}
lang_pair = gengo.getServiceLanguagePairs(lc_src='en')
if lang_pair['opstat'] == 'ok':
for g_lang in lang_pair['response']:
if g_lang['lc_tgt'] not in supported_langs:
supported_langs[g_lang['lc_tgt']] = []
supported_langs[g_lang['lc_tgt']] += [g_lang['tier']]
return supported_langs
def _get_gengo_corresponding_language(cr, lang):
return lang in LANG_CODE_MAPPING and LANG_CODE_MAPPING[lang][0] or lang
def _get_source_query(self, cr, uid, name, types, lang, source, res_id):
query, params = super(ir_translation, self)._get_source_query(cr, uid, name, types, lang, source, res_id)
query += """
ORDER BY
CASE
WHEN gengo_translation=%s then 10
WHEN gengo_translation=%s then 20
WHEN gengo_translation=%s then 30
WHEN gengo_translation=%s then 40
ELSE 0
END DESC
"""
params += ('machine', 'standard', 'ultra', 'pro',)
return (query, params)
| agpl-3.0 |
olituks/sentinella | frontend/library/web2py/gluon/tests/test_web.py | 2 | 5867 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Unit tests for running web2py
"""
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import subprocess
import time
import signal
def fix_sys_path():
"""
logic to have always the correct sys.path
'', web2py/gluon, web2py/site-packages, web2py/ ...
"""
def add_path_first(path):
sys.path = [path] + [p for p in sys.path if (
not p == path and not p == (path + '/'))]
path = os.path.dirname(os.path.abspath(__file__))
if not os.path.isfile(os.path.join(path,'web2py.py')):
i = 0
while i<10:
i += 1
if os.path.exists(os.path.join(path,'web2py.py')):
break
path = os.path.abspath(os.path.join(path, '..'))
paths = [path,
os.path.abspath(os.path.join(path, 'site-packages')),
os.path.abspath(os.path.join(path, 'gluon')),
'']
[add_path_first(path) for path in paths]
fix_sys_path()
from contrib.webclient import WebClient
from urllib2 import HTTPError
webserverprocess = None
def startwebserver():
global webserverprocess
path = path = os.path.dirname(os.path.abspath(__file__))
if not os.path.isfile(os.path.join(path,'web2py.py')):
i = 0
while i<10:
i += 1
if os.path.exists(os.path.join(path,'web2py.py')):
break
path = os.path.abspath(os.path.join(path, '..'))
web2py_exec = os.path.join(path, 'web2py.py')
webserverprocess = subprocess.Popen([sys.executable, web2py_exec, '-a', 'testpass'])
print 'Sleeping before web2py starts...'
for a in range(1,11):
time.sleep(1)
print a, '...'
try:
c = WebClient('http://127.0.0.1:8000')
c.get('/')
break
except:
continue
print ''
def terminate_process(pid):
#Taken from http://stackoverflow.com/questions/1064335/in-python-2-5-how-do-i-kill-a-subprocess
# all this **blah** is because we are stuck with Python 2.5 and \
#we cannot use Popen.terminate()
if sys.platform.startswith('win'):
import ctypes
PROCESS_TERMINATE = 1
handle = ctypes.windll.kernel32.OpenProcess(PROCESS_TERMINATE, False, pid)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
ctypes.windll.kernel32.CloseHandle(handle)
else:
os.kill(pid, signal.SIGKILL)
def stopwebserver():
global webserverprocess
print 'Killing webserver'
if sys.version_info < (2,6):
terminate_process(webserverprocess.pid)
else:
webserverprocess.terminate()
class LiveTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
startwebserver()
@classmethod
def tearDownClass(cls):
stopwebserver()
@unittest.skipIf("datastore" in os.getenv("DB", ""), "TODO: setup web test for app engine")
class TestWeb(LiveTest):
def testRegisterAndLogin(self):
client = WebClient('http://127.0.0.1:8000/welcome/default/')
client.get('index')
# register
data = dict(first_name='Homer',
last_name='Simpson',
email='homer@web2py.com',
password='test',
password_two='test',
_formname='register')
client.post('user/register', data=data)
# logout
client.get('user/logout')
# login again
data = dict(email='homer@web2py.com',
password='test',
_formname='login')
client.post('user/login', data=data)
self.assertTrue('Welcome Homer' in client.text)
# check registration and login were successful
client.get('index')
# COMMENTED BECAUSE FAILS BUT WHY?
self.assertTrue('Welcome Homer' in client.text)
client = WebClient('http://127.0.0.1:8000/admin/default/')
client.post('index', data=dict(password='hello'))
client.get('site')
client.get('design/welcome')
def testStaticCache(self):
s = WebClient('http://127.0.0.1:8000/welcome/')
s.get('static/js/web2py.js')
assert('expires' not in s.headers)
assert(not s.headers['cache-control'].startswith('max-age'))
text = s.text
s.get('static/_1.2.3/js/web2py.js')
assert(text == s.text)
assert('expires' in s.headers)
assert(s.headers['cache-control'].startswith('max-age'))
def testSoap(self):
# test soap server implementation
from gluon.contrib.pysimplesoap.client import SoapClient, SoapFault
url = 'http://127.0.0.1:8000/examples/soap_examples/call/soap?WSDL'
client = SoapClient(wsdl=url)
ret = client.SubIntegers(a=3, b=2)
# check that the value returned is ok
assert('SubResult' in ret)
assert(ret['SubResult'] == 1)
try:
ret = client.Division(a=3, b=0)
except SoapFault, sf:
# verify the exception value is ok
# assert(sf.faultstring == "float division by zero") # true only in 2.7
assert(sf.faultcode == "Server.ZeroDivisionError")
# store sent and received xml for low level test
xml_request = client.xml_request
xml_response = client.xml_response
# do a low level raw soap request (using
s = WebClient('http://127.0.0.1:8000/')
try:
s.post('examples/soap_examples/call/soap', data=xml_request, method="POST")
except HTTPError, e:
assert(e.msg=='INTERNAL SERVER ERROR')
# check internal server error returned (issue 153)
assert(s.status == 500)
assert(s.text == xml_response)
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 |
QLGu/Django-facebook | docs/docs_env/Lib/encodings/ascii.py | 858 | 1248 | """ Python 'ascii' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.ascii_encode
decode = codecs.ascii_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.ascii_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.ascii_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
class StreamConverter(StreamWriter,StreamReader):
encode = codecs.ascii_decode
decode = codecs.ascii_encode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='ascii',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| bsd-3-clause |
gusai-francelabs/datafari | windows/python/Lib/sre_constants.py | 185 | 7197 | #
# Secret Labs' Regular Expression Engine
#
# various symbols used by the regular expression engine.
# run this script to update the _sre include files!
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# update when constants are added or removed
MAGIC = 20031017
try:
from _sre import MAXREPEAT
except ImportError:
import _sre
MAXREPEAT = _sre.MAXREPEAT = 65535
# SRE standard exception (access as sre.error)
# should this really be here?
class error(Exception):
pass
# operators
FAILURE = "failure"
SUCCESS = "success"
ANY = "any"
ANY_ALL = "any_all"
ASSERT = "assert"
ASSERT_NOT = "assert_not"
AT = "at"
BIGCHARSET = "bigcharset"
BRANCH = "branch"
CALL = "call"
CATEGORY = "category"
CHARSET = "charset"
GROUPREF = "groupref"
GROUPREF_IGNORE = "groupref_ignore"
GROUPREF_EXISTS = "groupref_exists"
IN = "in"
IN_IGNORE = "in_ignore"
INFO = "info"
JUMP = "jump"
LITERAL = "literal"
LITERAL_IGNORE = "literal_ignore"
MARK = "mark"
MAX_REPEAT = "max_repeat"
MAX_UNTIL = "max_until"
MIN_REPEAT = "min_repeat"
MIN_UNTIL = "min_until"
NEGATE = "negate"
NOT_LITERAL = "not_literal"
NOT_LITERAL_IGNORE = "not_literal_ignore"
RANGE = "range"
REPEAT = "repeat"
REPEAT_ONE = "repeat_one"
SUBPATTERN = "subpattern"
MIN_REPEAT_ONE = "min_repeat_one"
# positions
AT_BEGINNING = "at_beginning"
AT_BEGINNING_LINE = "at_beginning_line"
AT_BEGINNING_STRING = "at_beginning_string"
AT_BOUNDARY = "at_boundary"
AT_NON_BOUNDARY = "at_non_boundary"
AT_END = "at_end"
AT_END_LINE = "at_end_line"
AT_END_STRING = "at_end_string"
AT_LOC_BOUNDARY = "at_loc_boundary"
AT_LOC_NON_BOUNDARY = "at_loc_non_boundary"
AT_UNI_BOUNDARY = "at_uni_boundary"
AT_UNI_NON_BOUNDARY = "at_uni_non_boundary"
# categories
CATEGORY_DIGIT = "category_digit"
CATEGORY_NOT_DIGIT = "category_not_digit"
CATEGORY_SPACE = "category_space"
CATEGORY_NOT_SPACE = "category_not_space"
CATEGORY_WORD = "category_word"
CATEGORY_NOT_WORD = "category_not_word"
CATEGORY_LINEBREAK = "category_linebreak"
CATEGORY_NOT_LINEBREAK = "category_not_linebreak"
CATEGORY_LOC_WORD = "category_loc_word"
CATEGORY_LOC_NOT_WORD = "category_loc_not_word"
CATEGORY_UNI_DIGIT = "category_uni_digit"
CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit"
CATEGORY_UNI_SPACE = "category_uni_space"
CATEGORY_UNI_NOT_SPACE = "category_uni_not_space"
CATEGORY_UNI_WORD = "category_uni_word"
CATEGORY_UNI_NOT_WORD = "category_uni_not_word"
CATEGORY_UNI_LINEBREAK = "category_uni_linebreak"
CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak"
OPCODES = [
# failure=0 success=1 (just because it looks better that way :-)
FAILURE, SUCCESS,
ANY, ANY_ALL,
ASSERT, ASSERT_NOT,
AT,
BRANCH,
CALL,
CATEGORY,
CHARSET, BIGCHARSET,
GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE,
IN, IN_IGNORE,
INFO,
JUMP,
LITERAL, LITERAL_IGNORE,
MARK,
MAX_UNTIL,
MIN_UNTIL,
NOT_LITERAL, NOT_LITERAL_IGNORE,
NEGATE,
RANGE,
REPEAT,
REPEAT_ONE,
SUBPATTERN,
MIN_REPEAT_ONE
]
ATCODES = [
AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY,
AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING,
AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY,
AT_UNI_NON_BOUNDARY
]
CHCODES = [
CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE,
CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD,
CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD,
CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT,
CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD,
CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK,
CATEGORY_UNI_NOT_LINEBREAK
]
def makedict(list):
d = {}
i = 0
for item in list:
d[item] = i
i = i + 1
return d
OPCODES = makedict(OPCODES)
ATCODES = makedict(ATCODES)
CHCODES = makedict(CHCODES)
# replacement operations for "ignore case" mode
OP_IGNORE = {
GROUPREF: GROUPREF_IGNORE,
IN: IN_IGNORE,
LITERAL: LITERAL_IGNORE,
NOT_LITERAL: NOT_LITERAL_IGNORE
}
AT_MULTILINE = {
AT_BEGINNING: AT_BEGINNING_LINE,
AT_END: AT_END_LINE
}
AT_LOCALE = {
AT_BOUNDARY: AT_LOC_BOUNDARY,
AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
}
AT_UNICODE = {
AT_BOUNDARY: AT_UNI_BOUNDARY,
AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
}
CH_LOCALE = {
CATEGORY_DIGIT: CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
CATEGORY_WORD: CATEGORY_LOC_WORD,
CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
}
CH_UNICODE = {
CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_UNI_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
CATEGORY_WORD: CATEGORY_UNI_WORD,
CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
}
# flags
SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode locale
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
# flags for INFO primitive
SRE_INFO_PREFIX = 1 # has prefix
SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
SRE_INFO_CHARSET = 4 # pattern starts with character from given set
if __name__ == "__main__":
def dump(f, d, prefix):
items = d.items()
items.sort(key=lambda a: a[1])
for k, v in items:
f.write("#define %s_%s %s\n" % (prefix, k.upper(), v))
f = open("sre_constants.h", "w")
f.write("""\
/*
* Secret Labs' Regular Expression Engine
*
* regular expression matching engine
*
* NOTE: This file is generated by sre_constants.py. If you need
* to change anything in here, edit sre_constants.py and run it.
*
* Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
*
* See the _sre.c file for information on usage and redistribution.
*/
""")
f.write("#define SRE_MAGIC %d\n" % MAGIC)
dump(f, OPCODES, "SRE_OP")
dump(f, ATCODES, "SRE")
dump(f, CHCODES, "SRE")
f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
f.close()
print "done"
| apache-2.0 |
nugget/home-assistant | tests/components/notify/test_homematic.py | 14 | 2656 | """The tests for the Homematic notification platform."""
import unittest
from homeassistant.setup import setup_component
import homeassistant.components.notify as notify_comp
from tests.common import assert_setup_component, get_test_home_assistant
class TestHomematicNotify(unittest.TestCase):
"""Test the Homematic notifications."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_setup_full(self):
"""Test valid configuration."""
setup_component(self.hass, 'homematic', {
'homematic': {
'hosts': {
'ccu2': {
'host': '127.0.0.1'
}
}
}
})
with assert_setup_component(1) as handle_config:
assert setup_component(self.hass, 'notify', {
'notify': {
'name': 'test',
'platform': 'homematic',
'address': 'NEQXXXXXXX',
'channel': 2,
'param': 'SUBMIT',
'value': '1,1,108000,2',
'interface': 'my-interface'}
})
assert handle_config[notify_comp.DOMAIN]
def test_setup_without_optional(self):
"""Test valid configuration without optional."""
setup_component(self.hass, 'homematic', {
'homematic': {
'hosts': {
'ccu2': {
'host': '127.0.0.1'
}
}
}
})
with assert_setup_component(1) as handle_config:
assert setup_component(self.hass, 'notify', {
'notify': {
'name': 'test',
'platform': 'homematic',
'address': 'NEQXXXXXXX',
'channel': 2,
'param': 'SUBMIT',
'value': '1,1,108000,2'}
})
assert handle_config[notify_comp.DOMAIN]
def test_bad_config(self):
"""Test invalid configuration."""
config = {
notify_comp.DOMAIN: {
'name': 'test',
'platform': 'homematic'
}
}
with assert_setup_component(0) as handle_config:
assert setup_component(self.hass, notify_comp.DOMAIN, config)
assert not handle_config[notify_comp.DOMAIN]
| apache-2.0 |
quanvm009/codev7 | openerp/addons/base/res/res_users.py | 1 | 41573 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2013 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from functools import partial
import logging
from lxml import etree
from lxml.builder import E
import openerp
from openerp import SUPERUSER_ID
from openerp import pooler, tools
import openerp.exceptions
from openerp.osv import fields,osv, expression
from openerp.osv.orm import browse_record
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class groups(osv.osv):
_name = "res.groups"
_description = "Access Groups"
_rec_name = 'full_name'
def _get_full_name(self, cr, uid, ids, field, arg, context=None):
res = {}
for g in self.browse(cr, uid, ids, context):
if g.category_id:
res[g.id] = '%s / %s' % (g.category_id.name, g.name)
else:
res[g.id] = g.name
return res
def _search_group(self, cr, uid, obj, name, args, context=None):
operand = args[0][2]
operator = args[0][1]
lst = True
if isinstance(operand, bool):
domains = [[('name', operator, operand)], [('category_id.name', operator, operand)]]
if operator in expression.NEGATIVE_TERM_OPERATORS == (not operand):
return expression.AND(domains)
else:
return expression.OR(domains)
if isinstance(operand, basestring):
lst = False
operand = [operand]
where = []
for group in operand:
values = filter(bool, group.split('/'))
group_name = values.pop().strip()
category_name = values and '/'.join(values).strip() or group_name
group_domain = [('name', operator, lst and [group_name] or group_name)]
category_domain = [('category_id.name', operator, lst and [category_name] or category_name)]
if operator in expression.NEGATIVE_TERM_OPERATORS and not values:
category_domain = expression.OR([category_domain, [('category_id', '=', False)]])
if (operator in expression.NEGATIVE_TERM_OPERATORS) == (not values):
sub_where = expression.AND([group_domain, category_domain])
else:
sub_where = expression.OR([group_domain, category_domain])
if operator in expression.NEGATIVE_TERM_OPERATORS:
where = expression.AND([where, sub_where])
else:
where = expression.OR([where, sub_where])
return where
_columns = {
'name': fields.char('Name', size=64, required=True, translate=True),
'users': fields.many2many('res.users', 'res_groups_users_rel', 'gid', 'uid', 'Users'),
'model_access': fields.one2many('ir.model.access', 'group_id', 'Access Controls'),
'rule_groups': fields.many2many('ir.rule', 'rule_group_rel',
'group_id', 'rule_group_id', 'Rules', domain=[('global', '=', False)]),
'menu_access': fields.many2many('ir.ui.menu', 'ir_ui_menu_group_rel', 'gid', 'menu_id', 'Access Menu'),
'view_access': fields.many2many('ir.ui.view', 'ir_ui_view_group_rel', 'group_id', 'view_id', 'Views'),
'comment' : fields.text('Comment', size=250, translate=True),
'category_id': fields.many2one('ir.module.category', 'Application', select=True),
'full_name': fields.function(_get_full_name, type='char', string='Group Name', fnct_search=_search_group),
}
_sql_constraints = [
('name_uniq', 'unique (category_id, name)', 'The name of the group must be unique within an application!')
]
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
# add explicit ordering if search is sorted on full_name
if order and order.startswith('full_name'):
ids = super(groups, self).search(cr, uid, args, context=context)
gs = self.browse(cr, uid, ids, context)
gs.sort(key=lambda g: g.full_name, reverse=order.endswith('DESC'))
gs = gs[offset:offset+limit] if limit else gs[offset:]
return map(int, gs)
return super(groups, self).search(cr, uid, args, offset, limit, order, context, count)
def copy(self, cr, uid, id, default=None, context=None):
group_name = self.read(cr, uid, [id], ['name'])[0]['name']
default.update({'name': _('%s (copy)')%group_name})
return super(groups, self).copy(cr, uid, id, default, context)
def write(self, cr, uid, ids, vals, context=None):
if 'name' in vals:
if vals['name'].startswith('-'):
raise osv.except_osv(_('Error'),
_('The name of the group can not start with "-"'))
res = super(groups, self).write(cr, uid, ids, vals, context=context)
self.pool.get('ir.model.access').call_cache_clearing_methods(cr)
return res
groups()
class res_users(osv.osv):
""" User class. A res.users record models an OpenERP user and is different
from an employee.
res.users class now inherits from res.partner. The partner model is
used to store the data related to the partner: lang, name, address,
avatar, ... The user model is now dedicated to technical data.
"""
__admin_ids = {}
_uid_cache = {}
_inherits = {
'res.partner': 'partner_id',
}
_name = "res.users"
_description = 'Users'
def _set_new_password(self, cr, uid, id, name, value, args, context=None):
if value is False:
# Do not update the password if no value is provided, ignore silently.
# For example web client submits False values for all empty fields.
return
if uid == id:
# To change their own password users must use the client-specific change password wizard,
# so that the new password is immediately used for further RPC requests, otherwise the user
# will face unexpected 'Access Denied' exceptions.
raise osv.except_osv(_('Operation Canceled'), _('Please use the change password wizard (in User Preferences or User menu) to change your own password.'))
self.write(cr, uid, id, {'password': value})
def _get_password(self, cr, uid, ids, arg, karg, context=None):
return dict.fromkeys(ids, '')
_columns = {
'id': fields.integer('ID'),
'login_date': fields.date('Latest connection', select=1),
'partner_id': fields.many2one('res.partner', required=True,
string='Related Partner', ondelete='restrict',
help='Partner-related data of the user'),
'login': fields.char('Login', size=64, required=True,
help="Used to log into the system"),
'password': fields.char('Password', size=64, invisible=True,
help="Keep empty if you don't want the user to be able to connect on the system."),
'new_password': fields.function(_get_password, type='char', size=64,
fnct_inv=_set_new_password, string='Set Password',
help="Specify a value only when creating a user or if you're "\
"changing the user's password, otherwise leave empty. After "\
"a change of password, the user has to login again."),
'signature': fields.text('Signature'),
'active': fields.boolean('Active'),
'action_id': fields.many2one('ir.actions.actions', 'Home Action', help="If specified, this action will be opened at logon for this user, in addition to the standard menu."),
'menu_id': fields.many2one('ir.actions.actions', 'Menu Action', help="If specified, the action will replace the standard menu for this user."),
'groups_id': fields.many2many('res.groups', 'res_groups_users_rel', 'uid', 'gid', 'Groups'),
# Special behavior for this field: res.company.search() will only return the companies
# available to the current user (should be the user's companies?), when the user_preference
# context is set.
'company_id': fields.many2one('res.company', 'Company', required=True,
help='The company this user is currently working for.', context={'user_preference': True}),
'company_ids':fields.many2many('res.company','res_company_users_rel','user_id','cid','Companies'),
# backward compatibility fields
'user_email': fields.related('email', type='char',
deprecated='Use the email field instead of user_email. This field will be removed with OpenERP 7.1.'),
}
def on_change_company_id(self, cr, uid, ids, company_id):
return {'warning' : {
'title': _("Company Switch Warning"),
'message': _("Please keep in mind that documents currently displayed may not be relevant after switching to another company. If you have unsaved changes, please make sure to save and close all forms before switching to a different company. (You can click on Cancel in the User Preferences now)"),
}
}
def onchange_state(self, cr, uid, ids, state_id, context=None):
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)]
return self.pool.get('res.partner').onchange_state(cr, uid, partner_ids, state_id, context=context)
def onchange_type(self, cr, uid, ids, is_company, context=None):
""" Wrapper on the user.partner onchange_type, because some calls to the
partner form view applied to the user may trigger the
partner.onchange_type method, but applied to the user object.
"""
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)]
return self.pool.get('res.partner').onchange_type(cr, uid, partner_ids, is_company, context=context)
def onchange_address(self, cr, uid, ids, use_parent_address, parent_id, context=None):
""" Wrapper on the user.partner onchange_address, because some calls to the
partner form view applied to the user may trigger the
partner.onchange_type method, but applied to the user object.
"""
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)]
return self.pool.get('res.partner').onchange_address(cr, uid, partner_ids, use_parent_address, parent_id, context=context)
def _check_company(self, cr, uid, ids, context=None):
return all(((this.company_id in this.company_ids) or not this.company_ids) for this in self.browse(cr, uid, ids, context))
_constraints = [
(_check_company, 'The chosen company is not in the allowed companies for this user', ['company_id', 'company_ids']),
]
_sql_constraints = [
('login_key', 'UNIQUE (login)', 'You can not have two users with the same login !')
]
def _get_company(self,cr, uid, context=None, uid2=False):
if not uid2:
uid2 = uid
user = self.pool.get('res.users').read(cr, uid, uid2, ['company_id'], context)
company_id = user.get('company_id', False)
return company_id and company_id[0] or False
def _get_companies(self, cr, uid, context=None):
c = self._get_company(cr, uid, context)
if c:
return [c]
return False
def _get_menu(self,cr, uid, context=None):
dataobj = self.pool.get('ir.model.data')
try:
model, res_id = dataobj.get_object_reference(cr, uid, 'base', 'action_menu_admin')
if model != 'ir.actions.act_window':
return False
return res_id
except ValueError:
return False
def _get_group(self,cr, uid, context=None):
dataobj = self.pool.get('ir.model.data')
result = []
try:
dummy,group_id = dataobj.get_object_reference(cr, SUPERUSER_ID, 'base', 'group_user')
result.append(group_id)
dummy,group_id = dataobj.get_object_reference(cr, SUPERUSER_ID, 'base', 'group_partner_manager')
result.append(group_id)
except ValueError:
# If these groups does not exists anymore
pass
return result
_defaults = {
'password': '',
'active': True,
'customer': False,
'menu_id': _get_menu,
'company_id': _get_company,
'company_ids': _get_companies,
'groups_id': _get_group,
'image': lambda self, cr, uid, ctx={}: self.pool.get('res.partner')._get_default_image(cr, uid, False, ctx, colorize=True),
}
# User can write on a few of his own fields (but not his groups for example)
SELF_WRITEABLE_FIELDS = ['password', 'signature', 'action_id', 'company_id', 'email', 'name', 'image', 'image_medium', 'image_small', 'lang', 'tz']
# User can read a few of his own fields
SELF_READABLE_FIELDS = ['signature', 'company_id', 'login', 'email', 'name', 'image', 'image_medium', 'image_small', 'lang', 'tz', 'tz_offset', 'groups_id', 'partner_id', '__last_update']
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
def override_password(o):
if 'password' in o and ('id' not in o or o['id'] != uid):
o['password'] = '********'
return o
if fields and (ids == [uid] or ids == uid):
for key in fields:
if not (key in self.SELF_READABLE_FIELDS or key.startswith('context_')):
break
else:
# safe fields only, so we read as super-user to bypass access rights
uid = SUPERUSER_ID
result = super(res_users, self).read(cr, uid, ids, fields=fields, context=context, load=load)
canwrite = self.pool.get('ir.model.access').check(cr, uid, 'res.users', 'write', False)
if not canwrite:
if isinstance(ids, (int, long)):
result = override_password(result)
else:
result = map(override_password, result)
return result
def create(self, cr, uid, vals, context=None):
user_id = super(res_users, self).create(cr, uid, vals, context=context)
user = self.browse(cr, uid, user_id, context=context)
if user.partner_id.company_id:
user.partner_id.write({'company_id': user.company_id.id})
return user_id
def write(self, cr, uid, ids, values, context=None):
if not hasattr(ids, '__iter__'):
ids = [ids]
if ids == [uid]:
for key in values.keys():
if not (key in self.SELF_WRITEABLE_FIELDS or key.startswith('context_')):
break
else:
if 'company_id' in values:
if not (values['company_id'] in self.read(cr, SUPERUSER_ID, uid, ['company_ids'], context=context)['company_ids']):
del values['company_id']
uid = 1 # safe fields only, so we write as super-user to bypass access rights
res = super(res_users, self).write(cr, uid, ids, values, context=context)
if 'company_id' in values:
for user in self.browse(cr, uid, ids, context=context):
# if partner is global we keep it that way
if user.partner_id.company_id and user.partner_id.company_id.id != values['company_id']:
user.partner_id.write({'company_id': user.company_id.id})
# clear caches linked to the users
self.pool.get('ir.model.access').call_cache_clearing_methods(cr)
clear = partial(self.pool.get('ir.rule').clear_cache, cr)
map(clear, ids)
db = cr.dbname
if db in self._uid_cache:
for id in ids:
if id in self._uid_cache[db]:
del self._uid_cache[db][id]
self.context_get.clear_cache(self)
return res
def unlink(self, cr, uid, ids, context=None):
if 1 in ids:
raise osv.except_osv(_('Can not remove root user!'), _('You can not remove the admin user as it is used internally for resources created by OpenERP (updates, module installation, ...)'))
db = cr.dbname
if db in self._uid_cache:
for id in ids:
if id in self._uid_cache[db]:
del self._uid_cache[db][id]
return super(res_users, self).unlink(cr, uid, ids, context=context)
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
if not args:
args=[]
if not context:
context={}
ids = []
if name and operator in ['=', 'ilike']:
ids = self.search(cr, user, [('login','=',name)]+ args, limit=limit, context=context)
if not ids:
ids = self.search(cr, user, [('name',operator,name)]+ args, limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
def copy(self, cr, uid, id, default=None, context=None):
user2copy = self.read(cr, uid, [id], ['login','name'])[0]
default = dict(default or {})
if ('name' not in default) and ('partner_id' not in default):
default['name'] = _("%s (copy)") % user2copy['name']
if 'login' not in default:
default['login'] = _("%s (copy)") % user2copy['login']
return super(res_users, self).copy(cr, uid, id, default, context)
def copy_data(self, cr, uid, ids, default=None, context=None):
if default is None:
default = {}
default.update({'login_date': False})
return super(res_users, self).copy_data(cr, uid, ids, default, context=context)
@tools.ormcache(skiparg=2)
def context_get(self, cr, uid, context=None):
user = self.browse(cr, SUPERUSER_ID, uid, context)
result = {}
for k in self._all_columns.keys():
if k.startswith('context_'):
context_key = k[8:]
elif k in ['lang', 'tz']:
context_key = k
else:
context_key = False
if context_key:
res = getattr(user,k) or False
if isinstance(res, browse_record):
res = res.id
result[context_key] = res or False
return result
def action_get(self, cr, uid, context=None):
dataobj = self.pool.get('ir.model.data')
data_id = dataobj._get_id(cr, SUPERUSER_ID, 'base', 'action_res_users_my')
return dataobj.browse(cr, uid, data_id, context=context).res_id
def check_super(self, passwd):
if passwd == tools.config['admin_passwd']:
return True
else:
raise openerp.exceptions.AccessDenied()
def check_credentials(self, cr, uid, password):
""" Override this method to plug additional authentication methods"""
res = self.search(cr, SUPERUSER_ID, [('id','=',uid),('password','=',password)])
if not res:
raise openerp.exceptions.AccessDenied()
def login(self, db, login, password):
if not password:
return False
user_id = False
cr = pooler.get_db(db).cursor()
try:
# autocommit: our single update request will be performed atomically.
# (In this way, there is no opportunity to have two transactions
# interleaving their cr.execute()..cr.commit() calls and have one
# of them rolled back due to a concurrent access.)
cr.autocommit(True)
# check if user exists
res = self.search(cr, SUPERUSER_ID, [('login','=',login)])
if res:
user_id = res[0]
# check credentials
self.check_credentials(cr, user_id, password)
# We effectively unconditionally write the res_users line.
# Even w/ autocommit there's a chance the user row will be locked,
# in which case we can't delay the login just for the purpose of
# update the last login date - hence we use FOR UPDATE NOWAIT to
# try to get the lock - fail-fast
# Failing to acquire the lock on the res_users row probably means
# another request is holding it. No big deal, we don't want to
# prevent/delay login in that case. It will also have been logged
# as a SQL error, if anyone cares.
try:
# NO KEY introduced in PostgreSQL 9.3 http://www.postgresql.org/docs/9.3/static/release-9-3.html#AEN115299
update_clause = 'NO KEY UPDATE' if cr._cnx.server_version >= 90300 else 'UPDATE'
cr.execute("SELECT id FROM res_users WHERE id=%%s FOR %s NOWAIT" % update_clause, (user_id,), log_exceptions=False)
cr.execute("UPDATE res_users SET login_date = now() AT TIME ZONE 'UTC' WHERE id=%s", (user_id,))
except Exception:
_logger.debug("Failed to update last_login for db:%s login:%s", db, login, exc_info=True)
except openerp.exceptions.AccessDenied:
_logger.info("Login failed for db:%s login:%s", db, login)
user_id = False
finally:
cr.close()
return user_id
def authenticate(self, db, login, password, user_agent_env):
"""Verifies and returns the user ID corresponding to the given
``login`` and ``password`` combination, or False if there was
no matching user.
:param str db: the database on which user is trying to authenticate
:param str login: username
:param str password: user password
:param dict user_agent_env: environment dictionary describing any
relevant environment attributes
"""
uid = self.login(db, login, password)
if uid == openerp.SUPERUSER_ID:
# Successfully logged in as admin!
# Attempt to guess the web base url...
if user_agent_env and user_agent_env.get('base_location'):
cr = pooler.get_db(db).cursor()
try:
base = user_agent_env['base_location']
ICP = self.pool.get('ir.config_parameter')
if not ICP.get_param(cr, uid, 'web.base.url.freeze'):
ICP.set_param(cr, uid, 'web.base.url', base)
cr.commit()
except Exception:
_logger.exception("Failed to update web.base.url configuration parameter")
finally:
cr.close()
return uid
def check(self, db, uid, passwd):
"""Verifies that the given (uid, password) is authorized for the database ``db`` and
raise an exception if it is not."""
if not passwd:
# empty passwords disallowed for obvious security reasons
raise openerp.exceptions.AccessDenied()
if self._uid_cache.get(db, {}).get(uid) == passwd:
return
cr = pooler.get_db(db).cursor()
try:
self.check_credentials(cr, uid, passwd)
if self._uid_cache.has_key(db):
self._uid_cache[db][uid] = passwd
else:
self._uid_cache[db] = {uid:passwd}
finally:
cr.close()
def change_password(self, cr, uid, old_passwd, new_passwd, context=None):
"""Change current user password. Old password must be provided explicitly
to prevent hijacking an existing user session, or for cases where the cleartext
password is not used to authenticate requests.
:return: True
:raise: openerp.exceptions.AccessDenied when old password is wrong
:raise: except_osv when new password is not set or empty
"""
self.check(cr.dbname, uid, old_passwd)
if new_passwd:
return self.write(cr, uid, uid, {'password': new_passwd})
raise osv.except_osv(_('Warning!'), _("Setting empty passwords is not allowed for security reasons!"))
def preference_save(self, cr, uid, ids, context=None):
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
def preference_change_password(self, cr, uid, ids, context=None):
return {
'type': 'ir.actions.client',
'tag': 'change_password',
'target': 'new',
}
def has_group(self, cr, uid, group_ext_id):
"""Checks whether user belongs to given group.
:param str group_ext_id: external ID (XML ID) of the group.
Must be provided in fully-qualified form (``module.ext_id``), as there
is no implicit module to use..
:return: True if the current user is a member of the group with the
given external ID (XML ID), else False.
"""
assert group_ext_id and '.' in group_ext_id, "External ID must be fully qualified"
module, ext_id = group_ext_id.split('.')
cr.execute("""SELECT 1 FROM res_groups_users_rel WHERE uid=%s AND gid IN
(SELECT res_id FROM ir_model_data WHERE module=%s AND name=%s)""",
(uid, module, ext_id))
return bool(cr.fetchone())
#
# Extension of res.groups and res.users with a relation for "implied" or
# "inherited" groups. Once a user belongs to a group, it automatically belongs
# to the implied groups (transitively).
#
class cset(object):
""" A cset (constrained set) is a set of elements that may be constrained to
be a subset of other csets. Elements added to a cset are automatically
added to its supersets. Cycles in the subset constraints are supported.
"""
def __init__(self, xs):
self.supersets = set()
self.elements = set(xs)
def subsetof(self, other):
if other is not self:
self.supersets.add(other)
other.update(self.elements)
def update(self, xs):
xs = set(xs) - self.elements
if xs: # xs will eventually be empty in case of a cycle
self.elements.update(xs)
for s in self.supersets:
s.update(xs)
def __iter__(self):
return iter(self.elements)
def concat(ls):
""" return the concatenation of a list of iterables """
res = []
for l in ls: res.extend(l)
return res
class groups_implied(osv.osv):
_inherit = 'res.groups'
def _get_trans_implied(self, cr, uid, ids, field, arg, context=None):
"computes the transitive closure of relation implied_ids"
memo = {} # use a memo for performance and cycle avoidance
def computed_set(g):
if g not in memo:
memo[g] = cset(g.implied_ids)
for h in g.implied_ids:
computed_set(h).subsetof(memo[g])
return memo[g]
res = {}
for g in self.browse(cr, SUPERUSER_ID, ids, context):
res[g.id] = map(int, computed_set(g))
return res
_columns = {
'implied_ids': fields.many2many('res.groups', 'res_groups_implied_rel', 'gid', 'hid',
string='Inherits', help='Users of this group automatically inherit those groups'),
'trans_implied_ids': fields.function(_get_trans_implied,
type='many2many', relation='res.groups', string='Transitively inherits'),
}
def create(self, cr, uid, values, context=None):
users = values.pop('users', None)
gid = super(groups_implied, self).create(cr, uid, values, context)
if users:
# delegate addition of users to add implied groups
self.write(cr, uid, [gid], {'users': users}, context)
return gid
def write(self, cr, uid, ids, values, context=None):
res = super(groups_implied, self).write(cr, uid, ids, values, context)
if values.get('users') or values.get('implied_ids'):
# add all implied groups (to all users of each group)
for g in self.browse(cr, uid, ids):
gids = map(int, g.trans_implied_ids)
vals = {'users': [(4, u.id) for u in g.users]}
super(groups_implied, self).write(cr, uid, gids, vals, context)
return res
class users_implied(osv.osv):
_inherit = 'res.users'
def create(self, cr, uid, values, context=None):
groups = values.pop('groups_id', None)
user_id = super(users_implied, self).create(cr, uid, values, context)
if groups:
# delegate addition of groups to add implied groups
self.write(cr, uid, [user_id], {'groups_id': groups}, context)
return user_id
def write(self, cr, uid, ids, values, context=None):
if not isinstance(ids,list):
ids = [ids]
res = super(users_implied, self).write(cr, uid, ids, values, context)
if values.get('groups_id'):
# add implied groups for all users
for user in self.browse(cr, uid, ids):
gs = set(concat([g.trans_implied_ids for g in user.groups_id]))
vals = {'groups_id': [(4, g.id) for g in gs]}
super(users_implied, self).write(cr, uid, [user.id], vals, context)
return res
#
# Extension of res.groups and res.users for the special groups view in the users
# form. This extension presents groups with selection and boolean widgets:
# - Groups are shown by application, with boolean and/or selection fields.
# Selection fields typically defines a role "Name" for the given application.
# - Uncategorized groups are presented as boolean fields and grouped in a
# section "Others".
#
# The user form view is modified by an inherited view (base.user_groups_view);
# the inherited view replaces the field 'groups_id' by a set of reified group
# fields (boolean or selection fields). The arch of that view is regenerated
# each time groups are changed.
#
# Naming conventions for reified groups fields:
# - boolean field 'in_group_ID' is True iff
# ID is in 'groups_id'
# - boolean field 'in_groups_ID1_..._IDk' is True iff
# any of ID1, ..., IDk is in 'groups_id'
# - selection field 'sel_groups_ID1_..._IDk' is ID iff
# ID is in 'groups_id' and ID is maximal in the set {ID1, ..., IDk}
def name_boolean_group(id): return 'in_group_' + str(id)
def name_boolean_groups(ids): return 'in_groups_' + '_'.join(map(str, ids))
def name_selection_groups(ids): return 'sel_groups_' + '_'.join(map(str, ids))
def is_boolean_group(name): return name.startswith('in_group_')
def is_boolean_groups(name): return name.startswith('in_groups_')
def is_selection_groups(name): return name.startswith('sel_groups_')
def is_reified_group(name):
return is_boolean_group(name) or is_boolean_groups(name) or is_selection_groups(name)
def get_boolean_group(name): return int(name[9:])
def get_boolean_groups(name): return map(int, name[10:].split('_'))
def get_selection_groups(name): return map(int, name[11:].split('_'))
def partition(f, xs):
"return a pair equivalent to (filter(f, xs), filter(lambda x: not f(x), xs))"
yes, nos = [], []
for x in xs:
(yes if f(x) else nos).append(x)
return yes, nos
class groups_view(osv.osv):
_inherit = 'res.groups'
def create(self, cr, uid, values, context=None):
res = super(groups_view, self).create(cr, uid, values, context)
self.update_user_groups_view(cr, uid, context)
return res
def write(self, cr, uid, ids, values, context=None):
res = super(groups_view, self).write(cr, uid, ids, values, context)
self.update_user_groups_view(cr, uid, context)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(groups_view, self).unlink(cr, uid, ids, context)
self.update_user_groups_view(cr, uid, context)
return res
def update_user_groups_view(self, cr, uid, context=None):
# the view with id 'base.user_groups_view' inherits the user form view,
# and introduces the reified group fields
if not context or context.get('install_mode'):
# use installation/admin language for translatable names in the view
context = dict(context or {})
context.update(self.pool['res.users'].context_get(cr, uid))
view = self.get_user_groups_view(cr, uid, context)
if view:
xml1, xml2 = [], []
xml1.append(E.separator(string=_('Application'), colspan="4"))
for app, kind, gs in self.get_groups_by_application(cr, uid, context):
# hide groups in category 'Hidden' (except to group_no_one)
attrs = {'groups': 'base.group_no_one'} if app and app.xml_id == 'base.module_category_hidden' else {}
if kind == 'selection':
# application name with a selection field
field_name = name_selection_groups(map(int, gs))
xml1.append(E.field(name=field_name, **attrs))
xml1.append(E.newline())
else:
# application separator with boolean fields
app_name = app and app.name or _('Other')
xml2.append(E.separator(string=app_name, colspan="4", **attrs))
for g in gs:
field_name = name_boolean_group(g.id)
xml2.append(E.field(name=field_name, **attrs))
xml = E.field(*(xml1 + xml2), name="groups_id", position="replace")
xml.addprevious(etree.Comment("GENERATED AUTOMATICALLY BY GROUPS"))
xml_content = etree.tostring(xml, pretty_print=True, xml_declaration=True, encoding="utf-8")
view.write({'arch': xml_content})
return True
def get_user_groups_view(self, cr, uid, context=None):
try:
view = self.pool.get('ir.model.data').get_object(cr, SUPERUSER_ID, 'base', 'user_groups_view', context)
assert view and view._table_name == 'ir.ui.view'
except Exception:
view = False
return view
def get_application_groups(self, cr, uid, domain=None, context=None):
return self.search(cr, uid, domain or [])
def get_groups_by_application(self, cr, uid, context=None):
""" return all groups classified by application (module category), as a list of pairs:
[(app, kind, [group, ...]), ...],
where app and group are browse records, and kind is either 'boolean' or 'selection'.
Applications are given in sequence order. If kind is 'selection', the groups are
given in reverse implication order.
"""
def linearized(gs):
gs = set(gs)
# determine sequence order: a group should appear after its implied groups
order = dict.fromkeys(gs, 0)
for g in gs:
for h in gs.intersection(g.trans_implied_ids):
order[h] -= 1
# check whether order is total, i.e., sequence orders are distinct
if len(set(order.itervalues())) == len(gs):
return sorted(gs, key=lambda g: order[g])
return None
# classify all groups by application
gids = self.get_application_groups(cr, uid, context=context)
by_app, others = {}, []
for g in self.browse(cr, uid, gids, context):
if g.category_id:
by_app.setdefault(g.category_id, []).append(g)
else:
others.append(g)
# build the result
res = []
apps = sorted(by_app.iterkeys(), key=lambda a: a.sequence or 0)
for app in apps:
gs = linearized(by_app[app])
if gs:
res.append((app, 'selection', gs))
else:
res.append((app, 'boolean', by_app[app]))
if others:
res.append((False, 'boolean', others))
return res
class users_view(osv.osv):
_inherit = 'res.users'
def create(self, cr, uid, values, context=None):
self._set_reified_groups(values)
return super(users_view, self).create(cr, uid, values, context)
def write(self, cr, uid, ids, values, context=None):
self._set_reified_groups(values)
return super(users_view, self).write(cr, uid, ids, values, context)
def _set_reified_groups(self, values):
""" reflect reified group fields in values['groups_id'] """
if 'groups_id' in values:
# groups are already given, ignore group fields
for f in filter(is_reified_group, values.iterkeys()):
del values[f]
return
add, remove = [], []
for f in values.keys():
if is_boolean_group(f):
target = add if values.pop(f) else remove
target.append(get_boolean_group(f))
elif is_boolean_groups(f):
if not values.pop(f):
remove.extend(get_boolean_groups(f))
elif is_selection_groups(f):
remove.extend(get_selection_groups(f))
selected = values.pop(f)
if selected:
add.append(selected)
# update values *only* if groups are being modified, otherwise
# we introduce spurious changes that might break the super.write() call.
if add or remove:
# remove groups in 'remove' and add groups in 'add'
values['groups_id'] = [(3, id) for id in remove] + [(4, id) for id in add]
def default_get(self, cr, uid, fields, context=None):
group_fields, fields = partition(is_reified_group, fields)
fields1 = (fields + ['groups_id']) if group_fields else fields
values = super(users_view, self).default_get(cr, uid, fields1, context)
self._get_reified_groups(group_fields, values)
return values
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
if not fields:
fields = self.fields_get(cr, uid, context=context).keys()
group_fields, fields = partition(is_reified_group, fields)
if not 'groups_id' in fields:
fields.append('groups_id')
res = super(users_view, self).read(cr, uid, ids, fields, context=context, load=load)
if res:
for values in (res if isinstance(res, list) else [res]):
self._get_reified_groups(group_fields, values)
return res
def _get_reified_groups(self, fields, values):
""" compute the given reified group fields from values['groups_id'] """
gids = set(values.get('groups_id') or [])
for f in fields:
if is_boolean_group(f):
values[f] = get_boolean_group(f) in gids
elif is_boolean_groups(f):
values[f] = not gids.isdisjoint(get_boolean_groups(f))
elif is_selection_groups(f):
selected = [gid for gid in get_selection_groups(f) if gid in gids]
values[f] = selected and selected[-1] or False
def fields_get(self, cr, uid, allfields=None, context=None, write_access=True):
res = super(users_view, self).fields_get(cr, uid, allfields, context, write_access)
# add reified groups fields
if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
return res
for app, kind, gs in self.pool.get('res.groups').get_groups_by_application(cr, uid, context):
if kind == 'selection':
# selection group field
tips = ['%s: %s' % (g.name, g.comment) for g in gs if g.comment]
res[name_selection_groups(map(int, gs))] = {
'type': 'selection',
'string': app and app.name or _('Other'),
'selection': [(False, '')] + [(g.id, g.name) for g in gs],
'help': '\n'.join(tips),
'exportable': False,
'selectable': False,
}
else:
# boolean group fields
for g in gs:
res[name_boolean_group(g.id)] = {
'type': 'boolean',
'string': g.name,
'help': g.comment,
'exportable': False,
'selectable': False,
}
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
matslindh/codingchallenges | adventofcode2016/14.py | 1 | 1448 | from functools import lru_cache
import hashlib
salt = 'yjdafjpo'
#salt = 'abc'
hashes = {}
@lru_cache(None)
def first_with_three(s):
cnt = 0
prev_x = None
for x in s:
if prev_x != x:
cnt = 0
cnt += 1
if cnt == 3:
return x
prev_x = x
return None
@lru_cache(None)
def seq_count(s):
prev_x = None
cnt = 1
cnts = {}
for x in s:
if prev_x and prev_x != x:
if prev_x not in cnts or cnts[prev_x] < cnt:
cnts[prev_x] = cnt
cnt = 1
elif prev_x:
cnt += 1
prev_x = x
if prev_x not in cnts or cnts[prev_x] < cnt:
cnts[prev_x] = cnt
return cnts
@lru_cache(None)
def hash(s, i):
h = s + str(i)
# part 1
# return hashlib.md5(h.encode('ascii')).hexdigest()
for _ in range(0, 2017):
h = hashlib.md5(h.encode('ascii')).hexdigest()
return h
idx = 0
keys = 0
found = False
while not found:
h = hash(salt, idx)
t = first_with_three(h)
if t:
for x in range(idx+1, idx+1001):
h_2 = hash(salt, x)
cnts = seq_count(h_2)
if t in cnts and cnts[t] > 4:
keys += 1
print("Found key " + str(keys) + " at index " + str(idx))
if keys == 72:
found = True
found_key = True
break
idx += 1 | mit |
jpopelka/fabric8-analytics-worker | alembic/versions/22a1cd66a9c6_batches_reviews.py | 3 | 1638 | """Two new tables: batch and review.
Revision ID: 22a1cd66a9c6
Revises: 963d3d929b19
Create Date: 2016-05-10 06:50:04.158499
"""
# revision identifiers, used by Alembic.
revision = '22a1cd66a9c6'
down_revision = '963d3d929b19'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
"""Upgrade the database to a newer revision."""
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('batch',
sa.Column('token', sa.Integer(), nullable=False),
sa.Column('started_at', sa.DateTime(), nullable=True),
sa.Column('epvs', postgresql.JSONB(), nullable=True),
sa.PrimaryKeyConstraint('token'))
op.create_table('review',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('approved', sa.Boolean(), nullable=True),
sa.Column('user', sa.Integer(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('comment', sa.Text(), nullable=True),
sa.Column('epv', sa.String(length=255), nullable=True),
sa.ForeignKeyConstraint(['user'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'))
# ### end Alembic commands ###
def downgrade():
"""Downgrade the database to an older revision."""
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('review')
op.drop_table('batch')
# ### end Alembic commands ###
| gpl-3.0 |
victor-prado/broker-manager | environment/lib/python3.5/site-packages/pandas/tools/tests/test_merge.py | 7 | 56114 | # pylint: disable=E1103
import nose
from datetime import datetime
from numpy.random import randn
from numpy import nan
import numpy as np
import random
import pandas as pd
from pandas.compat import lrange, lzip
from pandas.tools.merge import merge, concat, MergeError
from pandas.util.testing import (assert_frame_equal,
assert_series_equal,
slow)
from pandas import DataFrame, Index, MultiIndex, Series, Categorical
import pandas.util.testing as tm
N = 50
NGROUPS = 8
def get_test_data(ngroups=NGROUPS, n=N):
unique_groups = lrange(ngroups)
arr = np.asarray(np.tile(unique_groups, n // ngroups))
if len(arr) < n:
arr = np.asarray(list(arr) + unique_groups[:n - len(arr)])
random.shuffle(arr)
return arr
class TestMerge(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
# aggregate multiple columns
self.df = DataFrame({'key1': get_test_data(),
'key2': get_test_data(),
'data1': np.random.randn(N),
'data2': np.random.randn(N)})
# exclude a couple keys for fun
self.df = self.df[self.df['key2'] > 1]
self.df2 = DataFrame({'key1': get_test_data(n=N // 5),
'key2': get_test_data(ngroups=NGROUPS // 2,
n=N // 5),
'value': np.random.randn(N // 5)})
self.left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],
'v1': np.random.randn(7)})
self.right = DataFrame({'v2': np.random.randn(4)},
index=['d', 'b', 'c', 'a'])
def test_merge_common(self):
joined = merge(self.df, self.df2)
exp = merge(self.df, self.df2, on=['key1', 'key2'])
tm.assert_frame_equal(joined, exp)
def test_merge_index_singlekey_right_vs_left(self):
left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],
'v1': np.random.randn(7)})
right = DataFrame({'v2': np.random.randn(4)},
index=['d', 'b', 'c', 'a'])
merged1 = merge(left, right, left_on='key',
right_index=True, how='left', sort=False)
merged2 = merge(right, left, right_on='key',
left_index=True, how='right', sort=False)
assert_frame_equal(merged1, merged2.ix[:, merged1.columns])
merged1 = merge(left, right, left_on='key',
right_index=True, how='left', sort=True)
merged2 = merge(right, left, right_on='key',
left_index=True, how='right', sort=True)
assert_frame_equal(merged1, merged2.ix[:, merged1.columns])
def test_merge_index_singlekey_inner(self):
left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],
'v1': np.random.randn(7)})
right = DataFrame({'v2': np.random.randn(4)},
index=['d', 'b', 'c', 'a'])
# inner join
result = merge(left, right, left_on='key', right_index=True,
how='inner')
expected = left.join(right, on='key').ix[result.index]
assert_frame_equal(result, expected)
result = merge(right, left, right_on='key', left_index=True,
how='inner')
expected = left.join(right, on='key').ix[result.index]
assert_frame_equal(result, expected.ix[:, result.columns])
def test_merge_misspecified(self):
self.assertRaises(ValueError, merge, self.left, self.right,
left_index=True)
self.assertRaises(ValueError, merge, self.left, self.right,
right_index=True)
self.assertRaises(ValueError, merge, self.left, self.left,
left_on='key', on='key')
self.assertRaises(ValueError, merge, self.df, self.df2,
left_on=['key1'], right_on=['key1', 'key2'])
def test_index_and_on_parameters_confusion(self):
self.assertRaises(ValueError, merge, self.df, self.df2, how='left',
left_index=False, right_index=['key1', 'key2'])
self.assertRaises(ValueError, merge, self.df, self.df2, how='left',
left_index=['key1', 'key2'], right_index=False)
self.assertRaises(ValueError, merge, self.df, self.df2, how='left',
left_index=['key1', 'key2'],
right_index=['key1', 'key2'])
def test_merge_overlap(self):
merged = merge(self.left, self.left, on='key')
exp_len = (self.left['key'].value_counts() ** 2).sum()
self.assertEqual(len(merged), exp_len)
self.assertIn('v1_x', merged)
self.assertIn('v1_y', merged)
def test_merge_different_column_key_names(self):
left = DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 4]})
right = DataFrame({'rkey': ['foo', 'bar', 'qux', 'foo'],
'value': [5, 6, 7, 8]})
merged = left.merge(right, left_on='lkey', right_on='rkey',
how='outer', sort=True)
exp = pd.Series(['bar', 'baz', 'foo', 'foo', 'foo', 'foo', np.nan],
name='lkey')
tm.assert_series_equal(merged['lkey'], exp)
exp = pd.Series(['bar', np.nan, 'foo', 'foo', 'foo', 'foo', 'qux'],
name='rkey')
tm.assert_series_equal(merged['rkey'], exp)
exp = pd.Series([2, 3, 1, 1, 4, 4, np.nan], name='value_x')
tm.assert_series_equal(merged['value_x'], exp)
exp = pd.Series([6, np.nan, 5, 8, 5, 8, 7], name='value_y')
tm.assert_series_equal(merged['value_y'], exp)
def test_merge_copy(self):
left = DataFrame({'a': 0, 'b': 1}, index=lrange(10))
right = DataFrame({'c': 'foo', 'd': 'bar'}, index=lrange(10))
merged = merge(left, right, left_index=True,
right_index=True, copy=True)
merged['a'] = 6
self.assertTrue((left['a'] == 0).all())
merged['d'] = 'peekaboo'
self.assertTrue((right['d'] == 'bar').all())
def test_merge_nocopy(self):
left = DataFrame({'a': 0, 'b': 1}, index=lrange(10))
right = DataFrame({'c': 'foo', 'd': 'bar'}, index=lrange(10))
merged = merge(left, right, left_index=True,
right_index=True, copy=False)
merged['a'] = 6
self.assertTrue((left['a'] == 6).all())
merged['d'] = 'peekaboo'
self.assertTrue((right['d'] == 'peekaboo').all())
def test_intelligently_handle_join_key(self):
# #733, be a bit more 1337 about not returning unconsolidated DataFrame
left = DataFrame({'key': [1, 1, 2, 2, 3],
'value': lrange(5)}, columns=['value', 'key'])
right = DataFrame({'key': [1, 1, 2, 3, 4, 5],
'rvalue': lrange(6)})
joined = merge(left, right, on='key', how='outer')
expected = DataFrame({'key': [1, 1, 1, 1, 2, 2, 3, 4, 5],
'value': np.array([0, 0, 1, 1, 2, 3, 4,
np.nan, np.nan]),
'rvalue': [0, 1, 0, 1, 2, 2, 3, 4, 5]},
columns=['value', 'key', 'rvalue'])
assert_frame_equal(joined, expected)
def test_merge_join_key_dtype_cast(self):
# #8596
df1 = DataFrame({'key': [1], 'v1': [10]})
df2 = DataFrame({'key': [2], 'v1': [20]})
df = merge(df1, df2, how='outer')
self.assertEqual(df['key'].dtype, 'int64')
df1 = DataFrame({'key': [True], 'v1': [1]})
df2 = DataFrame({'key': [False], 'v1': [0]})
df = merge(df1, df2, how='outer')
# GH13169
# this really should be bool
self.assertEqual(df['key'].dtype, 'object')
df1 = DataFrame({'val': [1]})
df2 = DataFrame({'val': [2]})
lkey = np.array([1])
rkey = np.array([2])
df = merge(df1, df2, left_on=lkey, right_on=rkey, how='outer')
self.assertEqual(df['key_0'].dtype, 'int64')
def test_handle_join_key_pass_array(self):
left = DataFrame({'key': [1, 1, 2, 2, 3],
'value': lrange(5)}, columns=['value', 'key'])
right = DataFrame({'rvalue': lrange(6)})
key = np.array([1, 1, 2, 3, 4, 5])
merged = merge(left, right, left_on='key', right_on=key, how='outer')
merged2 = merge(right, left, left_on=key, right_on='key', how='outer')
assert_series_equal(merged['key'], merged2['key'])
self.assertTrue(merged['key'].notnull().all())
self.assertTrue(merged2['key'].notnull().all())
left = DataFrame({'value': lrange(5)}, columns=['value'])
right = DataFrame({'rvalue': lrange(6)})
lkey = np.array([1, 1, 2, 2, 3])
rkey = np.array([1, 1, 2, 3, 4, 5])
merged = merge(left, right, left_on=lkey, right_on=rkey, how='outer')
self.assert_series_equal(merged['key_0'],
Series([1, 1, 1, 1, 2, 2, 3, 4, 5],
name='key_0'))
left = DataFrame({'value': lrange(3)})
right = DataFrame({'rvalue': lrange(6)})
key = np.array([0, 1, 1, 2, 2, 3], dtype=np.int64)
merged = merge(left, right, left_index=True, right_on=key, how='outer')
self.assert_series_equal(merged['key_0'], Series(key, name='key_0'))
def test_no_overlap_more_informative_error(self):
dt = datetime.now()
df1 = DataFrame({'x': ['a']}, index=[dt])
df2 = DataFrame({'y': ['b', 'c']}, index=[dt, dt])
self.assertRaises(MergeError, merge, df1, df2)
def test_merge_non_unique_indexes(self):
dt = datetime(2012, 5, 1)
dt2 = datetime(2012, 5, 2)
dt3 = datetime(2012, 5, 3)
dt4 = datetime(2012, 5, 4)
df1 = DataFrame({'x': ['a']}, index=[dt])
df2 = DataFrame({'y': ['b', 'c']}, index=[dt, dt])
_check_merge(df1, df2)
# Not monotonic
df1 = DataFrame({'x': ['a', 'b', 'q']}, index=[dt2, dt, dt4])
df2 = DataFrame({'y': ['c', 'd', 'e', 'f', 'g', 'h']},
index=[dt3, dt3, dt2, dt2, dt, dt])
_check_merge(df1, df2)
df1 = DataFrame({'x': ['a', 'b']}, index=[dt, dt])
df2 = DataFrame({'y': ['c', 'd']}, index=[dt, dt])
_check_merge(df1, df2)
def test_merge_non_unique_index_many_to_many(self):
dt = datetime(2012, 5, 1)
dt2 = datetime(2012, 5, 2)
dt3 = datetime(2012, 5, 3)
df1 = DataFrame({'x': ['a', 'b', 'c', 'd']},
index=[dt2, dt2, dt, dt])
df2 = DataFrame({'y': ['e', 'f', 'g', ' h', 'i']},
index=[dt2, dt2, dt3, dt, dt])
_check_merge(df1, df2)
def test_left_merge_empty_dataframe(self):
left = DataFrame({'key': [1], 'value': [2]})
right = DataFrame({'key': []})
result = merge(left, right, on='key', how='left')
assert_frame_equal(result, left)
result = merge(right, left, on='key', how='right')
assert_frame_equal(result, left)
def test_merge_left_empty_right_empty(self):
# GH 10824
left = pd.DataFrame([], columns=['a', 'b', 'c'])
right = pd.DataFrame([], columns=['x', 'y', 'z'])
exp_in = pd.DataFrame([], columns=['a', 'b', 'c', 'x', 'y', 'z'],
index=pd.Index([], dtype=object),
dtype=object)
for kwarg in [dict(left_index=True, right_index=True),
dict(left_index=True, right_on='x'),
dict(left_on='a', right_index=True),
dict(left_on='a', right_on='x')]:
result = pd.merge(left, right, how='inner', **kwarg)
tm.assert_frame_equal(result, exp_in)
result = pd.merge(left, right, how='left', **kwarg)
tm.assert_frame_equal(result, exp_in)
result = pd.merge(left, right, how='right', **kwarg)
tm.assert_frame_equal(result, exp_in)
result = pd.merge(left, right, how='outer', **kwarg)
tm.assert_frame_equal(result, exp_in)
def test_merge_left_empty_right_notempty(self):
# GH 10824
left = pd.DataFrame([], columns=['a', 'b', 'c'])
right = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=['x', 'y', 'z'])
exp_out = pd.DataFrame({'a': np.array([np.nan] * 3, dtype=object),
'b': np.array([np.nan] * 3, dtype=object),
'c': np.array([np.nan] * 3, dtype=object),
'x': [1, 4, 7],
'y': [2, 5, 8],
'z': [3, 6, 9]},
columns=['a', 'b', 'c', 'x', 'y', 'z'])
exp_in = exp_out[0:0] # make empty DataFrame keeping dtype
# result will have object dtype
exp_in.index = exp_in.index.astype(object)
def check1(exp, kwarg):
result = pd.merge(left, right, how='inner', **kwarg)
tm.assert_frame_equal(result, exp)
result = pd.merge(left, right, how='left', **kwarg)
tm.assert_frame_equal(result, exp)
def check2(exp, kwarg):
result = pd.merge(left, right, how='right', **kwarg)
tm.assert_frame_equal(result, exp)
result = pd.merge(left, right, how='outer', **kwarg)
tm.assert_frame_equal(result, exp)
for kwarg in [dict(left_index=True, right_index=True),
dict(left_index=True, right_on='x')]:
check1(exp_in, kwarg)
check2(exp_out, kwarg)
kwarg = dict(left_on='a', right_index=True)
check1(exp_in, kwarg)
exp_out['a'] = [0, 1, 2]
check2(exp_out, kwarg)
kwarg = dict(left_on='a', right_on='x')
check1(exp_in, kwarg)
exp_out['a'] = np.array([np.nan] * 3, dtype=object)
check2(exp_out, kwarg)
def test_merge_left_notempty_right_empty(self):
# GH 10824
left = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=['a', 'b', 'c'])
right = pd.DataFrame([], columns=['x', 'y', 'z'])
exp_out = pd.DataFrame({'a': [1, 4, 7],
'b': [2, 5, 8],
'c': [3, 6, 9],
'x': np.array([np.nan] * 3, dtype=object),
'y': np.array([np.nan] * 3, dtype=object),
'z': np.array([np.nan] * 3, dtype=object)},
columns=['a', 'b', 'c', 'x', 'y', 'z'])
exp_in = exp_out[0:0] # make empty DataFrame keeping dtype
# result will have object dtype
exp_in.index = exp_in.index.astype(object)
def check1(exp, kwarg):
result = pd.merge(left, right, how='inner', **kwarg)
tm.assert_frame_equal(result, exp)
result = pd.merge(left, right, how='right', **kwarg)
tm.assert_frame_equal(result, exp)
def check2(exp, kwarg):
result = pd.merge(left, right, how='left', **kwarg)
tm.assert_frame_equal(result, exp)
result = pd.merge(left, right, how='outer', **kwarg)
tm.assert_frame_equal(result, exp)
for kwarg in [dict(left_index=True, right_index=True),
dict(left_index=True, right_on='x'),
dict(left_on='a', right_index=True),
dict(left_on='a', right_on='x')]:
check1(exp_in, kwarg)
check2(exp_out, kwarg)
def test_merge_nosort(self):
# #2098, anything to do?
from datetime import datetime
d = {"var1": np.random.randint(0, 10, size=10),
"var2": np.random.randint(0, 10, size=10),
"var3": [datetime(2012, 1, 12), datetime(2011, 2, 4),
datetime(
2010, 2, 3), datetime(2012, 1, 12),
datetime(
2011, 2, 4), datetime(2012, 4, 3),
datetime(
2012, 3, 4), datetime(2008, 5, 1),
datetime(2010, 2, 3), datetime(2012, 2, 3)]}
df = DataFrame.from_dict(d)
var3 = df.var3.unique()
var3.sort()
new = DataFrame.from_dict({"var3": var3,
"var8": np.random.random(7)})
result = df.merge(new, on="var3", sort=False)
exp = merge(df, new, on='var3', sort=False)
assert_frame_equal(result, exp)
self.assertTrue((df.var3.unique() == result.var3.unique()).all())
def test_merge_nan_right(self):
df1 = DataFrame({"i1": [0, 1], "i2": [0, 1]})
df2 = DataFrame({"i1": [0], "i3": [0]})
result = df1.join(df2, on="i1", rsuffix="_")
expected = (DataFrame({'i1': {0: 0.0, 1: 1}, 'i2': {0: 0, 1: 1},
'i1_': {0: 0, 1: np.nan},
'i3': {0: 0.0, 1: np.nan},
None: {0: 0, 1: 0}})
.set_index(None)
.reset_index()[['i1', 'i2', 'i1_', 'i3']])
assert_frame_equal(result, expected, check_dtype=False)
df1 = DataFrame({"i1": [0, 1], "i2": [0.5, 1.5]})
df2 = DataFrame({"i1": [0], "i3": [0.7]})
result = df1.join(df2, rsuffix="_", on='i1')
expected = (DataFrame({'i1': {0: 0, 1: 1}, 'i1_': {0: 0.0, 1: nan},
'i2': {0: 0.5, 1: 1.5},
'i3': {0: 0.69999999999999996,
1: nan}})
[['i1', 'i2', 'i1_', 'i3']])
assert_frame_equal(result, expected)
def test_merge_type(self):
class NotADataFrame(DataFrame):
@property
def _constructor(self):
return NotADataFrame
nad = NotADataFrame(self.df)
result = nad.merge(self.df2, on='key1')
tm.assertIsInstance(result, NotADataFrame)
def test_join_append_timedeltas(self):
import datetime as dt
from pandas import NaT
# timedelta64 issues with join/merge
# GH 5695
d = {'d': dt.datetime(2013, 11, 5, 5, 56), 't': dt.timedelta(0, 22500)}
df = DataFrame(columns=list('dt'))
df = df.append(d, ignore_index=True)
result = df.append(d, ignore_index=True)
expected = DataFrame({'d': [dt.datetime(2013, 11, 5, 5, 56),
dt.datetime(2013, 11, 5, 5, 56)],
't': [dt.timedelta(0, 22500),
dt.timedelta(0, 22500)]})
assert_frame_equal(result, expected)
td = np.timedelta64(300000000)
lhs = DataFrame(Series([td, td], index=["A", "B"]))
rhs = DataFrame(Series([td], index=["A"]))
result = lhs.join(rhs, rsuffix='r', how="left")
expected = DataFrame({'0': Series([td, td], index=list('AB')),
'0r': Series([td, NaT], index=list('AB'))})
assert_frame_equal(result, expected)
def test_other_datetime_unit(self):
# GH 13389
df1 = pd.DataFrame({'entity_id': [101, 102]})
s = pd.Series([None, None], index=[101, 102], name='days')
for dtype in ['datetime64[D]', 'datetime64[h]', 'datetime64[m]',
'datetime64[s]', 'datetime64[ms]', 'datetime64[us]',
'datetime64[ns]']:
df2 = s.astype(dtype).to_frame('days')
# coerces to datetime64[ns], thus sholuld not be affected
self.assertEqual(df2['days'].dtype, 'datetime64[ns]')
result = df1.merge(df2, left_on='entity_id', right_index=True)
exp = pd.DataFrame({'entity_id': [101, 102],
'days': np.array(['nat', 'nat'],
dtype='datetime64[ns]')},
columns=['entity_id', 'days'])
tm.assert_frame_equal(result, exp)
def test_other_timedelta_unit(self):
# GH 13389
df1 = pd.DataFrame({'entity_id': [101, 102]})
s = pd.Series([None, None], index=[101, 102], name='days')
for dtype in ['timedelta64[D]', 'timedelta64[h]', 'timedelta64[m]',
'timedelta64[s]', 'timedelta64[ms]', 'timedelta64[us]',
'timedelta64[ns]']:
df2 = s.astype(dtype).to_frame('days')
self.assertEqual(df2['days'].dtype, dtype)
result = df1.merge(df2, left_on='entity_id', right_index=True)
exp = pd.DataFrame({'entity_id': [101, 102],
'days': np.array(['nat', 'nat'],
dtype=dtype)},
columns=['entity_id', 'days'])
tm.assert_frame_equal(result, exp)
def test_overlapping_columns_error_message(self):
df = DataFrame({'key': [1, 2, 3],
'v1': [4, 5, 6],
'v2': [7, 8, 9]})
df2 = DataFrame({'key': [1, 2, 3],
'v1': [4, 5, 6],
'v2': [7, 8, 9]})
df.columns = ['key', 'foo', 'foo']
df2.columns = ['key', 'bar', 'bar']
expected = DataFrame({'key': [1, 2, 3],
'v1': [4, 5, 6],
'v2': [7, 8, 9],
'v3': [4, 5, 6],
'v4': [7, 8, 9]})
expected.columns = ['key', 'foo', 'foo', 'bar', 'bar']
assert_frame_equal(merge(df, df2), expected)
# #2649, #10639
df2.columns = ['key1', 'foo', 'foo']
self.assertRaises(ValueError, merge, df, df2)
def test_merge_on_datetime64tz(self):
# GH11405
left = pd.DataFrame({'key': pd.date_range('20151010', periods=2,
tz='US/Eastern'),
'value': [1, 2]})
right = pd.DataFrame({'key': pd.date_range('20151011', periods=3,
tz='US/Eastern'),
'value': [1, 2, 3]})
expected = DataFrame({'key': pd.date_range('20151010', periods=4,
tz='US/Eastern'),
'value_x': [1, 2, np.nan, np.nan],
'value_y': [np.nan, 1, 2, 3]})
result = pd.merge(left, right, on='key', how='outer')
assert_frame_equal(result, expected)
left = pd.DataFrame({'value': pd.date_range('20151010', periods=2,
tz='US/Eastern'),
'key': [1, 2]})
right = pd.DataFrame({'value': pd.date_range('20151011', periods=2,
tz='US/Eastern'),
'key': [2, 3]})
expected = DataFrame({
'value_x': list(pd.date_range('20151010', periods=2,
tz='US/Eastern')) + [pd.NaT],
'value_y': [pd.NaT] + list(pd.date_range('20151011', periods=2,
tz='US/Eastern')),
'key': [1, 2, 3]})
result = pd.merge(left, right, on='key', how='outer')
assert_frame_equal(result, expected)
self.assertEqual(result['value_x'].dtype, 'datetime64[ns, US/Eastern]')
self.assertEqual(result['value_y'].dtype, 'datetime64[ns, US/Eastern]')
def test_merge_on_periods(self):
left = pd.DataFrame({'key': pd.period_range('20151010', periods=2,
freq='D'),
'value': [1, 2]})
right = pd.DataFrame({'key': pd.period_range('20151011', periods=3,
freq='D'),
'value': [1, 2, 3]})
expected = DataFrame({'key': pd.period_range('20151010', periods=4,
freq='D'),
'value_x': [1, 2, np.nan, np.nan],
'value_y': [np.nan, 1, 2, 3]})
result = pd.merge(left, right, on='key', how='outer')
assert_frame_equal(result, expected)
left = pd.DataFrame({'value': pd.period_range('20151010', periods=2,
freq='D'),
'key': [1, 2]})
right = pd.DataFrame({'value': pd.period_range('20151011', periods=2,
freq='D'),
'key': [2, 3]})
exp_x = pd.period_range('20151010', periods=2, freq='D')
exp_y = pd.period_range('20151011', periods=2, freq='D')
expected = DataFrame({'value_x': list(exp_x) + [pd.NaT],
'value_y': [pd.NaT] + list(exp_y),
'key': [1, 2, 3]})
result = pd.merge(left, right, on='key', how='outer')
assert_frame_equal(result, expected)
self.assertEqual(result['value_x'].dtype, 'object')
self.assertEqual(result['value_y'].dtype, 'object')
def test_indicator(self):
# PR #10054. xref #7412 and closes #8790.
df1 = DataFrame({'col1': [0, 1], 'col_left': [
'a', 'b'], 'col_conflict': [1, 2]})
df1_copy = df1.copy()
df2 = DataFrame({'col1': [1, 2, 3, 4, 5], 'col_right': [2, 2, 2, 2, 2],
'col_conflict': [1, 2, 3, 4, 5]})
df2_copy = df2.copy()
df_result = DataFrame({
'col1': [0, 1, 2, 3, 4, 5],
'col_conflict_x': [1, 2, np.nan, np.nan, np.nan, np.nan],
'col_left': ['a', 'b', np.nan, np.nan, np.nan, np.nan],
'col_conflict_y': [np.nan, 1, 2, 3, 4, 5],
'col_right': [np.nan, 2, 2, 2, 2, 2]})
df_result['_merge'] = Categorical(
['left_only', 'both', 'right_only',
'right_only', 'right_only', 'right_only'],
categories=['left_only', 'right_only', 'both'])
df_result = df_result[['col1', 'col_conflict_x', 'col_left',
'col_conflict_y', 'col_right', '_merge']]
test = merge(df1, df2, on='col1', how='outer', indicator=True)
assert_frame_equal(test, df_result)
test = df1.merge(df2, on='col1', how='outer', indicator=True)
assert_frame_equal(test, df_result)
# No side effects
assert_frame_equal(df1, df1_copy)
assert_frame_equal(df2, df2_copy)
# Check with custom name
df_result_custom_name = df_result
df_result_custom_name = df_result_custom_name.rename(
columns={'_merge': 'custom_name'})
test_custom_name = merge(
df1, df2, on='col1', how='outer', indicator='custom_name')
assert_frame_equal(test_custom_name, df_result_custom_name)
test_custom_name = df1.merge(
df2, on='col1', how='outer', indicator='custom_name')
assert_frame_equal(test_custom_name, df_result_custom_name)
# Check only accepts strings and booleans
with tm.assertRaises(ValueError):
merge(df1, df2, on='col1', how='outer', indicator=5)
with tm.assertRaises(ValueError):
df1.merge(df2, on='col1', how='outer', indicator=5)
# Check result integrity
test2 = merge(df1, df2, on='col1', how='left', indicator=True)
self.assertTrue((test2._merge != 'right_only').all())
test2 = df1.merge(df2, on='col1', how='left', indicator=True)
self.assertTrue((test2._merge != 'right_only').all())
test3 = merge(df1, df2, on='col1', how='right', indicator=True)
self.assertTrue((test3._merge != 'left_only').all())
test3 = df1.merge(df2, on='col1', how='right', indicator=True)
self.assertTrue((test3._merge != 'left_only').all())
test4 = merge(df1, df2, on='col1', how='inner', indicator=True)
self.assertTrue((test4._merge == 'both').all())
test4 = df1.merge(df2, on='col1', how='inner', indicator=True)
self.assertTrue((test4._merge == 'both').all())
# Check if working name in df
for i in ['_right_indicator', '_left_indicator', '_merge']:
df_badcolumn = DataFrame({'col1': [1, 2], i: [2, 2]})
with tm.assertRaises(ValueError):
merge(df1, df_badcolumn, on='col1',
how='outer', indicator=True)
with tm.assertRaises(ValueError):
df1.merge(df_badcolumn, on='col1', how='outer', indicator=True)
# Check for name conflict with custom name
df_badcolumn = DataFrame(
{'col1': [1, 2], 'custom_column_name': [2, 2]})
with tm.assertRaises(ValueError):
merge(df1, df_badcolumn, on='col1', how='outer',
indicator='custom_column_name')
with tm.assertRaises(ValueError):
df1.merge(df_badcolumn, on='col1', how='outer',
indicator='custom_column_name')
# Merge on multiple columns
df3 = DataFrame({'col1': [0, 1], 'col2': ['a', 'b']})
df4 = DataFrame({'col1': [1, 1, 3], 'col2': ['b', 'x', 'y']})
hand_coded_result = DataFrame({'col1': [0, 1, 1, 3],
'col2': ['a', 'b', 'x', 'y']})
hand_coded_result['_merge'] = Categorical(
['left_only', 'both', 'right_only', 'right_only'],
categories=['left_only', 'right_only', 'both'])
test5 = merge(df3, df4, on=['col1', 'col2'],
how='outer', indicator=True)
assert_frame_equal(test5, hand_coded_result)
test5 = df3.merge(df4, on=['col1', 'col2'],
how='outer', indicator=True)
assert_frame_equal(test5, hand_coded_result)
def _check_merge(x, y):
for how in ['inner', 'left', 'outer']:
result = x.join(y, how=how)
expected = merge(x.reset_index(), y.reset_index(), how=how,
sort=True)
expected = expected.set_index('index')
# TODO check_names on merge?
assert_frame_equal(result, expected, check_names=False)
class TestMergeMulti(tm.TestCase):
def setUp(self):
self.index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.to_join = DataFrame(np.random.randn(10, 3), index=self.index,
columns=['j_one', 'j_two', 'j_three'])
# a little relevant example with NAs
key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux',
'qux', 'snap']
key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two',
'three', 'one']
data = np.random.randn(len(key1))
self.data = DataFrame({'key1': key1, 'key2': key2,
'data': data})
def test_merge_on_multikey(self):
joined = self.data.join(self.to_join, on=['key1', 'key2'])
join_key = Index(lzip(self.data['key1'], self.data['key2']))
indexer = self.to_join.index.get_indexer(join_key)
ex_values = self.to_join.values.take(indexer, axis=0)
ex_values[indexer == -1] = np.nan
expected = self.data.join(DataFrame(ex_values,
columns=self.to_join.columns))
# TODO: columns aren't in the same order yet
assert_frame_equal(joined, expected.ix[:, joined.columns])
left = self.data.join(self.to_join, on=['key1', 'key2'], sort=True)
right = expected.ix[:, joined.columns].sort_values(['key1', 'key2'],
kind='mergesort')
assert_frame_equal(left, right)
def test_left_join_multi_index(self):
icols = ['1st', '2nd', '3rd']
def bind_cols(df):
iord = lambda a: 0 if a != a else ord(a)
f = lambda ts: ts.map(iord) - ord('a')
return (f(df['1st']) + f(df['3rd']) * 1e2 +
df['2nd'].fillna(0) * 1e4)
def run_asserts(left, right):
for sort in [False, True]:
res = left.join(right, on=icols, how='left', sort=sort)
self.assertTrue(len(left) < len(res) + 1)
self.assertFalse(res['4th'].isnull().any())
self.assertFalse(res['5th'].isnull().any())
tm.assert_series_equal(
res['4th'], - res['5th'], check_names=False)
result = bind_cols(res.iloc[:, :-2])
tm.assert_series_equal(res['4th'], result, check_names=False)
self.assertTrue(result.name is None)
if sort:
tm.assert_frame_equal(
res, res.sort_values(icols, kind='mergesort'))
out = merge(left, right.reset_index(), on=icols,
sort=sort, how='left')
res.index = np.arange(len(res))
tm.assert_frame_equal(out, res)
lc = list(map(chr, np.arange(ord('a'), ord('z') + 1)))
left = DataFrame(np.random.choice(lc, (5000, 2)),
columns=['1st', '3rd'])
left.insert(1, '2nd', np.random.randint(0, 1000, len(left)))
i = np.random.permutation(len(left))
right = left.iloc[i].copy()
left['4th'] = bind_cols(left)
right['5th'] = - bind_cols(right)
right.set_index(icols, inplace=True)
run_asserts(left, right)
# inject some nulls
left.loc[1::23, '1st'] = np.nan
left.loc[2::37, '2nd'] = np.nan
left.loc[3::43, '3rd'] = np.nan
left['4th'] = bind_cols(left)
i = np.random.permutation(len(left))
right = left.iloc[i, :-1]
right['5th'] = - bind_cols(right)
right.set_index(icols, inplace=True)
run_asserts(left, right)
def test_merge_right_vs_left(self):
# compare left vs right merge with multikey
for sort in [False, True]:
merged1 = self.data.merge(self.to_join, left_on=['key1', 'key2'],
right_index=True, how='left', sort=sort)
merged2 = self.to_join.merge(self.data, right_on=['key1', 'key2'],
left_index=True, how='right',
sort=sort)
merged2 = merged2.ix[:, merged1.columns]
assert_frame_equal(merged1, merged2)
def test_compress_group_combinations(self):
# ~ 40000000 possible unique groups
key1 = tm.rands_array(10, 10000)
key1 = np.tile(key1, 2)
key2 = key1[::-1]
df = DataFrame({'key1': key1, 'key2': key2,
'value1': np.random.randn(20000)})
df2 = DataFrame({'key1': key1[::2], 'key2': key2[::2],
'value2': np.random.randn(10000)})
# just to hit the label compression code path
merge(df, df2, how='outer')
def test_left_join_index_preserve_order(self):
left = DataFrame({'k1': [0, 1, 2] * 8,
'k2': ['foo', 'bar'] * 12,
'v': np.array(np.arange(24), dtype=np.int64)})
index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])
right = DataFrame({'v2': [5, 7]}, index=index)
result = left.join(right, on=['k1', 'k2'])
expected = left.copy()
expected['v2'] = np.nan
expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5
expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(
result.sort_values(['k1', 'k2'], kind='mergesort'),
left.join(right, on=['k1', 'k2'], sort=True))
# test join with multi dtypes blocks
left = DataFrame({'k1': [0, 1, 2] * 8,
'k2': ['foo', 'bar'] * 12,
'k3': np.array([0, 1, 2] * 8, dtype=np.float32),
'v': np.array(np.arange(24), dtype=np.int32)})
index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])
right = DataFrame({'v2': [5, 7]}, index=index)
result = left.join(right, on=['k1', 'k2'])
expected = left.copy()
expected['v2'] = np.nan
expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5
expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(
result.sort_values(['k1', 'k2'], kind='mergesort'),
left.join(right, on=['k1', 'k2'], sort=True))
# do a right join for an extra test
joined = merge(right, left, left_index=True,
right_on=['k1', 'k2'], how='right')
tm.assert_frame_equal(joined.ix[:, expected.columns], expected)
def test_left_join_index_multi_match_multiindex(self):
left = DataFrame([
['X', 'Y', 'C', 'a'],
['W', 'Y', 'C', 'e'],
['V', 'Q', 'A', 'h'],
['V', 'R', 'D', 'i'],
['X', 'Y', 'D', 'b'],
['X', 'Y', 'A', 'c'],
['W', 'Q', 'B', 'f'],
['W', 'R', 'C', 'g'],
['V', 'Y', 'C', 'j'],
['X', 'Y', 'B', 'd']],
columns=['cola', 'colb', 'colc', 'tag'],
index=[3, 2, 0, 1, 7, 6, 4, 5, 9, 8])
right = DataFrame([
['W', 'R', 'C', 0],
['W', 'Q', 'B', 3],
['W', 'Q', 'B', 8],
['X', 'Y', 'A', 1],
['X', 'Y', 'A', 4],
['X', 'Y', 'B', 5],
['X', 'Y', 'C', 6],
['X', 'Y', 'C', 9],
['X', 'Q', 'C', -6],
['X', 'R', 'C', -9],
['V', 'Y', 'C', 7],
['V', 'R', 'D', 2],
['V', 'R', 'D', -1],
['V', 'Q', 'A', -3]],
columns=['col1', 'col2', 'col3', 'val'])
right.set_index(['col1', 'col2', 'col3'], inplace=True)
result = left.join(right, on=['cola', 'colb', 'colc'], how='left')
expected = DataFrame([
['X', 'Y', 'C', 'a', 6],
['X', 'Y', 'C', 'a', 9],
['W', 'Y', 'C', 'e', nan],
['V', 'Q', 'A', 'h', -3],
['V', 'R', 'D', 'i', 2],
['V', 'R', 'D', 'i', -1],
['X', 'Y', 'D', 'b', nan],
['X', 'Y', 'A', 'c', 1],
['X', 'Y', 'A', 'c', 4],
['W', 'Q', 'B', 'f', 3],
['W', 'Q', 'B', 'f', 8],
['W', 'R', 'C', 'g', 0],
['V', 'Y', 'C', 'j', 7],
['X', 'Y', 'B', 'd', 5]],
columns=['cola', 'colb', 'colc', 'tag', 'val'],
index=[3, 3, 2, 0, 1, 1, 7, 6, 6, 4, 4, 5, 9, 8])
tm.assert_frame_equal(result, expected)
result = left.join(right, on=['cola', 'colb', 'colc'],
how='left', sort=True)
tm.assert_frame_equal(
result,
expected.sort_values(['cola', 'colb', 'colc'], kind='mergesort'))
# GH7331 - maintain left frame order in left merge
right.reset_index(inplace=True)
right.columns = left.columns[:3].tolist() + right.columns[-1:].tolist()
result = merge(left, right, how='left', on=left.columns[:-1].tolist())
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_left_join_index_multi_match(self):
left = DataFrame([
['c', 0],
['b', 1],
['a', 2],
['b', 3]],
columns=['tag', 'val'],
index=[2, 0, 1, 3])
right = DataFrame([
['a', 'v'],
['c', 'w'],
['c', 'x'],
['d', 'y'],
['a', 'z'],
['c', 'r'],
['e', 'q'],
['c', 's']],
columns=['tag', 'char'])
right.set_index('tag', inplace=True)
result = left.join(right, on='tag', how='left')
expected = DataFrame([
['c', 0, 'w'],
['c', 0, 'x'],
['c', 0, 'r'],
['c', 0, 's'],
['b', 1, nan],
['a', 2, 'v'],
['a', 2, 'z'],
['b', 3, nan]],
columns=['tag', 'val', 'char'],
index=[2, 2, 2, 2, 0, 1, 1, 3])
tm.assert_frame_equal(result, expected)
result = left.join(right, on='tag', how='left', sort=True)
tm.assert_frame_equal(
result, expected.sort_values('tag', kind='mergesort'))
# GH7331 - maintain left frame order in left merge
result = merge(left, right.reset_index(), how='left', on='tag')
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_join_multi_dtypes(self):
# test with multi dtypes in the join index
def _test(dtype1, dtype2):
left = DataFrame({'k1': np.array([0, 1, 2] * 8, dtype=dtype1),
'k2': ['foo', 'bar'] * 12,
'v': np.array(np.arange(24), dtype=np.int64)})
index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])
right = DataFrame(
{'v2': np.array([5, 7], dtype=dtype2)}, index=index)
result = left.join(right, on=['k1', 'k2'])
expected = left.copy()
if dtype2.kind == 'i':
dtype2 = np.dtype('float64')
expected['v2'] = np.array(np.nan, dtype=dtype2)
expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5
expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7
tm.assert_frame_equal(result, expected)
result = left.join(right, on=['k1', 'k2'], sort=True)
expected.sort_values(['k1', 'k2'], kind='mergesort', inplace=True)
tm.assert_frame_equal(result, expected)
for d1 in [np.int64, np.int32, np.int16, np.int8, np.uint8]:
for d2 in [np.int64, np.float64, np.float32, np.float16]:
_test(np.dtype(d1), np.dtype(d2))
def test_left_merge_na_buglet(self):
left = DataFrame({'id': list('abcde'), 'v1': randn(5),
'v2': randn(5), 'dummy': list('abcde'),
'v3': randn(5)},
columns=['id', 'v1', 'v2', 'dummy', 'v3'])
right = DataFrame({'id': ['a', 'b', np.nan, np.nan, np.nan],
'sv3': [1.234, 5.678, np.nan, np.nan, np.nan]})
merged = merge(left, right, on='id', how='left')
rdf = right.drop(['id'], axis=1)
expected = left.join(rdf)
tm.assert_frame_equal(merged, expected)
def test_merge_na_keys(self):
data = [[1950, "A", 1.5],
[1950, "B", 1.5],
[1955, "B", 1.5],
[1960, "B", np.nan],
[1970, "B", 4.],
[1950, "C", 4.],
[1960, "C", np.nan],
[1965, "C", 3.],
[1970, "C", 4.]]
frame = DataFrame(data, columns=["year", "panel", "data"])
other_data = [[1960, 'A', np.nan],
[1970, 'A', np.nan],
[1955, 'A', np.nan],
[1965, 'A', np.nan],
[1965, 'B', np.nan],
[1955, 'C', np.nan]]
other = DataFrame(other_data, columns=['year', 'panel', 'data'])
result = frame.merge(other, how='outer')
expected = frame.fillna(-999).merge(other.fillna(-999), how='outer')
expected = expected.replace(-999, np.nan)
tm.assert_frame_equal(result, expected)
@slow
def test_int64_overflow_issues(self):
from itertools import product
from collections import defaultdict
from pandas.core.groupby import _int64_overflow_possible
# #2690, combinatorial explosion
df1 = DataFrame(np.random.randn(1000, 7),
columns=list('ABCDEF') + ['G1'])
df2 = DataFrame(np.random.randn(1000, 7),
columns=list('ABCDEF') + ['G2'])
# it works!
result = merge(df1, df2, how='outer')
self.assertTrue(len(result) == 2000)
low, high, n = -1 << 10, 1 << 10, 1 << 20
left = DataFrame(np.random.randint(low, high, (n, 7)),
columns=list('ABCDEFG'))
left['left'] = left.sum(axis=1)
# one-2-one match
i = np.random.permutation(len(left))
right = left.iloc[i].copy()
right.columns = right.columns[:-1].tolist() + ['right']
right.index = np.arange(len(right))
right['right'] *= -1
out = merge(left, right, how='outer')
self.assertEqual(len(out), len(left))
assert_series_equal(out['left'], - out['right'], check_names=False)
result = out.iloc[:, :-2].sum(axis=1)
assert_series_equal(out['left'], result, check_names=False)
self.assertTrue(result.name is None)
out.sort_values(out.columns.tolist(), inplace=True)
out.index = np.arange(len(out))
for how in ['left', 'right', 'outer', 'inner']:
assert_frame_equal(out, merge(left, right, how=how, sort=True))
# check that left merge w/ sort=False maintains left frame order
out = merge(left, right, how='left', sort=False)
assert_frame_equal(left, out[left.columns.tolist()])
out = merge(right, left, how='left', sort=False)
assert_frame_equal(right, out[right.columns.tolist()])
# one-2-many/none match
n = 1 << 11
left = DataFrame(np.random.randint(low, high, (n, 7)).astype('int64'),
columns=list('ABCDEFG'))
# confirm that this is checking what it is supposed to check
shape = left.apply(Series.nunique).values
self.assertTrue(_int64_overflow_possible(shape))
# add duplicates to left frame
left = concat([left, left], ignore_index=True)
right = DataFrame(np.random.randint(low, high, (n // 2, 7))
.astype('int64'),
columns=list('ABCDEFG'))
# add duplicates & overlap with left to the right frame
i = np.random.choice(len(left), n)
right = concat([right, right, left.iloc[i]], ignore_index=True)
left['left'] = np.random.randn(len(left))
right['right'] = np.random.randn(len(right))
# shuffle left & right frames
i = np.random.permutation(len(left))
left = left.iloc[i].copy()
left.index = np.arange(len(left))
i = np.random.permutation(len(right))
right = right.iloc[i].copy()
right.index = np.arange(len(right))
# manually compute outer merge
ldict, rdict = defaultdict(list), defaultdict(list)
for idx, row in left.set_index(list('ABCDEFG')).iterrows():
ldict[idx].append(row['left'])
for idx, row in right.set_index(list('ABCDEFG')).iterrows():
rdict[idx].append(row['right'])
vals = []
for k, lval in ldict.items():
rval = rdict.get(k, [np.nan])
for lv, rv in product(lval, rval):
vals.append(k + tuple([lv, rv]))
for k, rval in rdict.items():
if k not in ldict:
for rv in rval:
vals.append(k + tuple([np.nan, rv]))
def align(df):
df = df.sort_values(df.columns.tolist())
df.index = np.arange(len(df))
return df
def verify_order(df):
kcols = list('ABCDEFG')
assert_frame_equal(df[kcols].copy(),
df[kcols].sort_values(kcols, kind='mergesort'))
out = DataFrame(vals, columns=list('ABCDEFG') + ['left', 'right'])
out = align(out)
jmask = {'left': out['left'].notnull(),
'right': out['right'].notnull(),
'inner': out['left'].notnull() & out['right'].notnull(),
'outer': np.ones(len(out), dtype='bool')}
for how in 'left', 'right', 'outer', 'inner':
mask = jmask[how]
frame = align(out[mask].copy())
self.assertTrue(mask.all() ^ mask.any() or how == 'outer')
for sort in [False, True]:
res = merge(left, right, how=how, sort=sort)
if sort:
verify_order(res)
# as in GH9092 dtypes break with outer/right join
assert_frame_equal(frame, align(res),
check_dtype=how not in ('right', 'outer'))
def test_join_multi_levels(self):
# GH 3662
# merge multi-levels
household = (
DataFrame(
dict(household_id=[1, 2, 3],
male=[0, 1, 0],
wealth=[196087.3, 316478.7, 294750]),
columns=['household_id', 'male', 'wealth'])
.set_index('household_id'))
portfolio = (
DataFrame(
dict(household_id=[1, 2, 2, 3, 3, 3, 4],
asset_id=["nl0000301109", "nl0000289783", "gb00b03mlx29",
"gb00b03mlx29", "lu0197800237", "nl0000289965",
np.nan],
name=["ABN Amro", "Robeco", "Royal Dutch Shell",
"Royal Dutch Shell",
"AAB Eastern Europe Equity Fund",
"Postbank BioTech Fonds", np.nan],
share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0]),
columns=['household_id', 'asset_id', 'name', 'share'])
.set_index(['household_id', 'asset_id']))
result = household.join(portfolio, how='inner')
expected = (
DataFrame(
dict(male=[0, 1, 1, 0, 0, 0],
wealth=[196087.3, 316478.7, 316478.7,
294750.0, 294750.0, 294750.0],
name=['ABN Amro', 'Robeco', 'Royal Dutch Shell',
'Royal Dutch Shell',
'AAB Eastern Europe Equity Fund',
'Postbank BioTech Fonds'],
share=[1.00, 0.40, 0.60, 0.15, 0.60, 0.25],
household_id=[1, 2, 2, 3, 3, 3],
asset_id=['nl0000301109', 'nl0000289783', 'gb00b03mlx29',
'gb00b03mlx29', 'lu0197800237',
'nl0000289965']))
.set_index(['household_id', 'asset_id'])
.reindex(columns=['male', 'wealth', 'name', 'share']))
assert_frame_equal(result, expected)
assert_frame_equal(result, expected)
# equivalency
result2 = (merge(household.reset_index(), portfolio.reset_index(),
on=['household_id'], how='inner')
.set_index(['household_id', 'asset_id']))
assert_frame_equal(result2, expected)
result = household.join(portfolio, how='outer')
expected = (concat([
expected,
(DataFrame(
dict(share=[1.00]),
index=MultiIndex.from_tuples(
[(4, np.nan)],
names=['household_id', 'asset_id'])))
], axis=0).reindex(columns=expected.columns))
assert_frame_equal(result, expected)
# invalid cases
household.index.name = 'foo'
def f():
household.join(portfolio, how='inner')
self.assertRaises(ValueError, f)
portfolio2 = portfolio.copy()
portfolio2.index.set_names(['household_id', 'foo'])
def f():
portfolio2.join(portfolio, how='inner')
self.assertRaises(ValueError, f)
def test_join_multi_levels2(self):
# some more advanced merges
# GH6360
household = (
DataFrame(
dict(household_id=[1, 2, 2, 3, 3, 3, 4],
asset_id=["nl0000301109", "nl0000301109", "gb00b03mlx29",
"gb00b03mlx29", "lu0197800237", "nl0000289965",
np.nan],
share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0]),
columns=['household_id', 'asset_id', 'share'])
.set_index(['household_id', 'asset_id']))
log_return = DataFrame(dict(
asset_id=["gb00b03mlx29", "gb00b03mlx29",
"gb00b03mlx29", "lu0197800237", "lu0197800237"],
t=[233, 234, 235, 180, 181],
log_return=[.09604978, -.06524096, .03532373, .03025441, .036997]
)).set_index(["asset_id", "t"])
expected = (
DataFrame(dict(
household_id=[2, 2, 2, 3, 3, 3, 3, 3],
asset_id=["gb00b03mlx29", "gb00b03mlx29",
"gb00b03mlx29", "gb00b03mlx29",
"gb00b03mlx29", "gb00b03mlx29",
"lu0197800237", "lu0197800237"],
t=[233, 234, 235, 233, 234, 235, 180, 181],
share=[0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6],
log_return=[.09604978, -.06524096, .03532373,
.09604978, -.06524096, .03532373,
.03025441, .036997]
))
.set_index(["household_id", "asset_id", "t"])
.reindex(columns=['share', 'log_return']))
def f():
household.join(log_return, how='inner')
self.assertRaises(NotImplementedError, f)
# this is the equivalency
result = (merge(household.reset_index(), log_return.reset_index(),
on=['asset_id'], how='inner')
.set_index(['household_id', 'asset_id', 't']))
assert_frame_equal(result, expected)
expected = (
DataFrame(dict(
household_id=[1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4],
asset_id=["nl0000301109", "nl0000289783", "gb00b03mlx29",
"gb00b03mlx29", "gb00b03mlx29",
"gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29",
"lu0197800237", "lu0197800237",
"nl0000289965", None],
t=[None, None, 233, 234, 235, 233, 234,
235, 180, 181, None, None],
share=[1.0, 0.4, 0.6, 0.6, 0.6, 0.15,
0.15, 0.15, 0.6, 0.6, 0.25, 1.0],
log_return=[None, None, .09604978, -.06524096, .03532373,
.09604978, -.06524096, .03532373,
.03025441, .036997, None, None]
))
.set_index(["household_id", "asset_id", "t"]))
def f():
household.join(log_return, how='outer')
self.assertRaises(NotImplementedError, f)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
atty303/pyfilesystem | fs/tests/test_ftpfs.py | 2 | 2085 | #!/usr/bin/env python
from fs.tests import FSTestCases, ThreadingTestCases
import unittest
import os
import sys
import shutil
import tempfile
import subprocess
import time
from os.path import abspath
try:
from pyftpdlib import ftpserver
except ImportError:
raise ImportError("Requires pyftpdlib <http://code.google.com/p/pyftpdlib/>")
from fs.path import *
from fs import ftpfs
ftp_port = 30000
class TestFTPFS(unittest.TestCase, FSTestCases, ThreadingTestCases):
def setUp(self):
global ftp_port
ftp_port += 1
use_port = str(ftp_port)
#ftp_port = 10000
self.temp_dir = tempfile.mkdtemp(u"ftpfstests")
file_path = __file__
if ':' not in file_path:
file_path = abspath(file_path)
self.ftp_server = subprocess.Popen([sys.executable, file_path, self.temp_dir, str(use_port)])
# Need to sleep to allow ftp server to start
time.sleep(.2)
self.fs = ftpfs.FTPFS('127.0.0.1', 'user', '12345', dircache=True, port=use_port, timeout=5.0)
self.fs.cache_hint(True)
def tearDown(self):
if sys.platform == 'win32':
import win32api
os.popen('TASKKILL /PID '+str(self.ftp_server.pid)+' /F')
else:
os.system('kill '+str(self.ftp_server.pid))
shutil.rmtree(self.temp_dir)
self.fs.close()
def check(self, p):
check_path = self.temp_dir.rstrip(os.sep) + os.sep + p
return os.path.exists(check_path.encode('utf-8'))
if __name__ == "__main__":
# Run an ftp server that exposes a given directory
import sys
authorizer = ftpserver.DummyAuthorizer()
authorizer.add_user("user", "12345", sys.argv[1], perm="elradfmw")
authorizer.add_anonymous(sys.argv[1])
def nolog(*args):
pass
ftpserver.log = nolog
ftpserver.logline = nolog
handler = ftpserver.FTPHandler
handler.authorizer = authorizer
address = ("127.0.0.1", int(sys.argv[2]))
#print address
ftpd = ftpserver.FTPServer(address, handler)
ftpd.serve_forever()
| bsd-3-clause |
MakeHer/edx-platform | lms/djangoapps/course_blocks/transformers/tests/test_library_content.py | 32 | 6813 | """
Tests for ContentLibraryTransformer.
"""
import mock
from student.tests.factories import CourseEnrollmentFactory
from course_blocks.transformers.library_content import ContentLibraryTransformer
from course_blocks.api import get_course_blocks, clear_course_from_cache
from lms.djangoapps.course_blocks.transformers.tests.test_helpers import CourseStructureTestCase
class MockedModule(object):
"""
Object with mocked selected modules for user.
"""
def __init__(self, state):
"""
Set state attribute on initialize.
"""
self.state = state
class ContentLibraryTransformerTestCase(CourseStructureTestCase):
"""
ContentLibraryTransformer Test
"""
def setUp(self):
"""
Setup course structure and create user for content library transformer test.
"""
super(ContentLibraryTransformerTestCase, self).setUp()
# Build course.
self.course_hierarchy = self.get_course_hierarchy()
self.blocks = self.build_course(self.course_hierarchy)
self.course = self.blocks['course']
clear_course_from_cache(self.course.id)
# Enroll user in course.
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id, is_active=True)
self.selected_module = MockedModule('{"selected": [["vertical", "vertical_vertical2"]]}')
self.transformer = ContentLibraryTransformer()
def get_course_hierarchy(self):
"""
Get a course hierarchy to test with.
"""
return [{
'org': 'ContentLibraryTransformer',
'course': 'CL101F',
'run': 'test_run',
'#type': 'course',
'#ref': 'course',
'#children': [
{
'#type': 'chapter',
'#ref': 'chapter1',
'#children': [
{
'#type': 'sequential',
'#ref': 'lesson1',
'#children': [
{
'#type': 'vertical',
'#ref': 'vertical1',
'#children': [
{
'metadata': {'category': 'library_content'},
'#type': 'library_content',
'#ref': 'library_content1',
'#children': [
{
'metadata': {'display_name': "CL Vertical 2"},
'#type': 'vertical',
'#ref': 'vertical2',
'#children': [
{
'metadata': {'display_name': "HTML1"},
'#type': 'html',
'#ref': 'html1',
}
]
},
{
'metadata': {'display_name': "CL Vertical 3"},
'#type': 'vertical',
'#ref': 'vertical3',
'#children': [
{
'metadata': {'display_name': "HTML2"},
'#type': 'html',
'#ref': 'html2',
}
]
}
]
}
],
}
],
}
],
}
]
}]
def test_content_library(self):
"""
Test when course has content library section.
First test user can't see any content library section,
and after that mock response from MySQL db.
Check user can see mocked sections in content library.
"""
raw_block_structure = get_course_blocks(
self.user,
self.course.location,
transformers={}
)
self.assertEqual(len(list(raw_block_structure.get_block_keys())), len(self.blocks))
clear_course_from_cache(self.course.id)
trans_block_structure = get_course_blocks(
self.user,
self.course.location,
transformers={self.transformer}
)
# Should dynamically assign a block to student
trans_keys = set(trans_block_structure.get_block_keys())
block_key_set = self.get_block_key_set(
self.blocks, 'course', 'chapter1', 'lesson1', 'vertical1', 'library_content1'
)
for key in block_key_set:
self.assertIn(key, trans_keys)
vertical2_selected = self.get_block_key_set(self.blocks, 'vertical2').pop() in trans_keys
vertical3_selected = self.get_block_key_set(self.blocks, 'vertical3').pop() in trans_keys
self.assertTrue(vertical2_selected or vertical3_selected)
# Check course structure again, with mocked selected modules for a user.
with mock.patch(
'course_blocks.transformers.library_content.ContentLibraryTransformer._get_student_module',
return_value=self.selected_module
):
clear_course_from_cache(self.course.id)
trans_block_structure = get_course_blocks(
self.user,
self.course.location,
transformers={self.transformer}
)
self.assertEqual(
set(trans_block_structure.get_block_keys()),
self.get_block_key_set(
self.blocks,
'course',
'chapter1',
'lesson1',
'vertical1',
'library_content1',
'vertical2',
'html1'
)
)
| agpl-3.0 |
PlayUAV/MissionPlanner | Lib/site-packages/scipy/special/utils/makenpz.py | 57 | 2159 | #!/bin/bash
"""
makenpz.py DIRECTORY
Build a npz containing all data files in the directory.
"""
import os
import numpy as np
from optparse import OptionParser
def main():
p = OptionParser()
options, args = p.parse_args()
if len(args) != 1:
p.error("no valid directory given")
inp = args[0]
outp = inp + ".npz"
files = []
for dirpath, dirnames, filenames in os.walk(inp):
for fn in filenames:
if fn.endswith('.txt'):
files.append(
(dirpath[len(inp)+1:] + '/' + fn[:-4],
os.path.join(dirpath, fn)))
data = {}
for key, fn in files:
key = key.replace('/', '-')
try:
data[key] = np.loadtxt(fn)
except ValueError:
print "Failed to load", fn
savez_compress(outp, **data)
def savez_compress(file, *args, **kwds):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError("Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
zip = zipfile.ZipFile(file, mode="w", compression=zipfile.ZIP_DEFLATED)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.iteritems():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
np.lib.format.write_array(fid, np.asanyarray(val))
fid.close()
fid = None
zip.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zip.close()
if __name__ == "__main__":
main()
| gpl-3.0 |
Tejal011089/med2-app | stock/report/batch_wise_balance_history/batch_wise_balance_history.py | 30 | 2638 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import flt
def execute(filters=None):
if not filters: filters = {}
columns = get_columns(filters)
item_map = get_item_details(filters)
iwb_map = get_item_warehouse_batch_map(filters)
data = []
for item in sorted(iwb_map):
for wh in sorted(iwb_map[item]):
for batch in sorted(iwb_map[item][wh]):
qty_dict = iwb_map[item][wh][batch]
data.append([item, item_map[item]["item_name"],
item_map[item]["description"], wh, batch,
qty_dict.opening_qty, qty_dict.in_qty,
qty_dict.out_qty, qty_dict.bal_qty
])
return columns, data
def get_columns(filters):
"""return columns based on filters"""
columns = ["Item:Link/Item:100"] + ["Item Name::150"] + ["Description::150"] + \
["Warehouse:Link/Warehouse:100"] + ["Batch:Link/Batch:100"] + ["Opening Qty::90"] + \
["In Qty::80"] + ["Out Qty::80"] + ["Balance Qty::90"]
return columns
def get_conditions(filters):
conditions = ""
if not filters.get("from_date"):
webnotes.msgprint("Please enter From Date", raise_exception=1)
if filters.get("to_date"):
conditions += " and posting_date <= '%s'" % filters["to_date"]
else:
webnotes.msgprint("Please enter To Date", raise_exception=1)
return conditions
#get all details
def get_stock_ledger_entries(filters):
conditions = get_conditions(filters)
return webnotes.conn.sql("""select item_code, batch_no, warehouse,
posting_date, actual_qty
from `tabStock Ledger Entry`
where docstatus < 2 %s order by item_code, warehouse""" %
conditions, as_dict=1)
def get_item_warehouse_batch_map(filters):
sle = get_stock_ledger_entries(filters)
iwb_map = {}
for d in sle:
iwb_map.setdefault(d.item_code, {}).setdefault(d.warehouse, {})\
.setdefault(d.batch_no, webnotes._dict({
"opening_qty": 0.0, "in_qty": 0.0, "out_qty": 0.0, "bal_qty": 0.0
}))
qty_dict = iwb_map[d.item_code][d.warehouse][d.batch_no]
if d.posting_date < filters["from_date"]:
qty_dict.opening_qty += flt(d.actual_qty)
elif d.posting_date >= filters["from_date"] and d.posting_date <= filters["to_date"]:
if flt(d.actual_qty) > 0:
qty_dict.in_qty += flt(d.actual_qty)
else:
qty_dict.out_qty += abs(flt(d.actual_qty))
qty_dict.bal_qty += flt(d.actual_qty)
return iwb_map
def get_item_details(filters):
item_map = {}
for d in webnotes.conn.sql("select name, item_name, description from tabItem", as_dict=1):
item_map.setdefault(d.name, d)
return item_map | agpl-3.0 |
vadim-ivlev/STUDY | coding/amazon3.py | 1 | 2843 | def get_num_pairs(seq):
"""
returns number of pairs in a sequence seq.
which equals to sum of arphmetic progression (n-1)...1
where n is length of seq
"""
n = len(seq)
return int(n * (n-1)/2) # sum of arphmetic progression (n-1)...1
def solution(A):
# scan the list to find all the monotonic index sequences
# [[1,2,3], [3,4], [4,5,6]
# we can use tree queues inc_queue, const_queue, dec_queue
# to keep temporary sequences while scaning
mono_sequences = []
# then find total num of segments probably using the func from
# the solution 1
total = 0
for seq in mono_sequences:
total += get_num_pairs(seq)
return total
#----------------------------------------------------------2
class Chunk:
def __init__(self):
self.chunks = []
self.buffer = []
def add(self, v):
if len(self.buffer) == 0:
self.buffer.append(v)
elif v == self.buffer[-1]:
pass
elif (v - self.buffer[-1]) == 1:
self.buffer.append(v)
elif (v - self.buffer[-1]) > 1:
self.flush()
self.buffer.append(v)
def flush(self):
self.chunks.append(self.buffer.copy())
self.buffer.clear()
def __str__(self):
return "chunks="+str(self.chunks) + " buffer ="+str(self.buffer)
# ================
a = [1, 2, 3, 2, 2, 2, 4, 5, 6, 7]
con = Chunk()
inc = Chunk()
dec = Chunk()
for i in range(len(a)-1):
if a[i] < a[i+1]:
inc.add(i)
inc.add(i+1)
elif a[i] > a[i+1]:
dec.add(i)
dec.add(i+1)
else:
con.add(i)
con.add(i+1)
con.flush()
dec.flush()
inc.flush()
total = 0
total += sum(get_num_pairs(c) for c in con.chunks)
total += sum(get_num_pairs(c) for c in inc.chunks)
total += sum(get_num_pairs(c) for c in dec.chunks)
print(total)
# -`---------------------------3
def indexes(a):
ids = []
for t in a:
if ids[-1:] != [t[0]]:
ids.append(t[0])
ids.append(t[1])
return ids
def split(a):
ar = []
q = [a[0]]
for i in range(1, len(a)):
if a[i] - q[-1] > 1:
ar.append(q.copy())
q.clear()
q.append(a[i])
ar.append(q.copy())
# q.clear()
return ar
# $=================================
a = [1, 2, 3, 2, 2, 2, 4, 5, 6, 7]
pairs = [(i, i+1, a[i+1] - a[i]) for i in range(len(a)-1)]
i_pairs = [p for p in pairs if p[2] > 0]
c_pairs = [p for p in pairs if p[2] == 0]
d_pairs = [p for p in pairs if p[2] < 0]
i_ind = indexes(i_pairs)
c_ind = indexes(c_pairs)
d_ind = indexes(d_pairs)
i_c = split(i_ind)
c_c = split(c_ind)
d_c = split(d_ind)
total = 0
total += sum(get_num_pairs(c) for c in i_c)
total += sum(get_num_pairs(c) for c in c_c)
total += sum(get_num_pairs(c) for c in d_c)
print(total)
| mit |
nuncjo/odoo | addons/gamification/models/goal.py | 219 | 26522 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DF
from openerp.tools.safe_eval import safe_eval
from openerp.tools.translate import _
import logging
import time
from datetime import date, datetime, timedelta
_logger = logging.getLogger(__name__)
class gamification_goal_definition(osv.Model):
"""Goal definition
A goal definition contains the way to evaluate an objective
Each module wanting to be able to set goals to the users needs to create
a new gamification_goal_definition
"""
_name = 'gamification.goal.definition'
_description = 'Gamification goal definition'
def _get_suffix(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, '')
for goal in self.browse(cr, uid, ids, context=context):
if goal.suffix and not goal.monetary:
res[goal.id] = goal.suffix
elif goal.monetary:
# use the current user's company currency
user = self.pool.get('res.users').browse(cr, uid, uid, context)
if goal.suffix:
res[goal.id] = "%s %s" % (user.company_id.currency_id.symbol, goal.suffix)
else:
res[goal.id] = user.company_id.currency_id.symbol
else:
res[goal.id] = ""
return res
_columns = {
'name': fields.char('Goal Definition', required=True, translate=True),
'description': fields.text('Goal Description'),
'monetary': fields.boolean('Monetary Value', help="The target and current value are defined in the company currency."),
'suffix': fields.char('Suffix', help="The unit of the target and current values", translate=True),
'full_suffix': fields.function(_get_suffix, type="char", string="Full Suffix", help="The currency and suffix field"),
'computation_mode': fields.selection([
('manually', 'Recorded manually'),
('count', 'Automatic: number of records'),
('sum', 'Automatic: sum on a field'),
('python', 'Automatic: execute a specific Python code'),
],
string="Computation Mode",
help="Defined how will be computed the goals. The result of the operation will be stored in the field 'Current'.",
required=True),
'display_mode': fields.selection([
('progress', 'Progressive (using numerical values)'),
('boolean', 'Exclusive (done or not-done)'),
],
string="Displayed as", required=True),
'model_id': fields.many2one('ir.model',
string='Model',
help='The model object for the field to evaluate'),
'model_inherited_model_ids': fields.related('model_id', 'inherited_model_ids', type="many2many", obj="ir.model",
string="Inherited models", readonly="True"),
'field_id': fields.many2one('ir.model.fields',
string='Field to Sum',
help='The field containing the value to evaluate'),
'field_date_id': fields.many2one('ir.model.fields',
string='Date Field',
help='The date to use for the time period evaluated'),
'domain': fields.char("Filter Domain",
help="Domain for filtering records. General rule, not user depending, e.g. [('state', '=', 'done')]. The expression can contain reference to 'user' which is a browse record of the current user if not in batch mode.",
required=True),
'batch_mode': fields.boolean('Batch Mode',
help="Evaluate the expression in batch instead of once for each user"),
'batch_distinctive_field': fields.many2one('ir.model.fields',
string="Distinctive field for batch user",
help="In batch mode, this indicates which field distinct one user form the other, e.g. user_id, partner_id..."),
'batch_user_expression': fields.char("Evaluted expression for batch mode",
help="The value to compare with the distinctive field. The expression can contain reference to 'user' which is a browse record of the current user, e.g. user.id, user.partner_id.id..."),
'compute_code': fields.text('Python Code',
help="Python code to be executed for each user. 'result' should contains the new current value. Evaluated user can be access through object.user_id."),
'condition': fields.selection([
('higher', 'The higher the better'),
('lower', 'The lower the better')
],
string='Goal Performance',
help='A goal is considered as completed when the current value is compared to the value to reach',
required=True),
'action_id': fields.many2one('ir.actions.act_window', string="Action",
help="The action that will be called to update the goal value."),
'res_id_field': fields.char("ID Field of user",
help="The field name on the user profile (res.users) containing the value for res_id for action."),
}
_defaults = {
'condition': 'higher',
'computation_mode': 'manually',
'domain': "[]",
'monetary': False,
'display_mode': 'progress',
}
def number_following(self, cr, uid, model_name="mail.thread", context=None):
"""Return the number of 'model_name' objects the user is following
The model specified in 'model_name' must inherit from mail.thread
"""
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return self.pool.get('mail.followers').search(cr, uid, [('res_model', '=', model_name), ('partner_id', '=', user.partner_id.id)], count=True, context=context)
def _check_domain_validity(self, cr, uid, ids, context=None):
# take admin as should always be present
superuser = self.pool['res.users'].browse(cr, uid, SUPERUSER_ID, context=context)
for definition in self.browse(cr, uid, ids, context=context):
if definition.computation_mode not in ('count', 'sum'):
continue
obj = self.pool[definition.model_id.model]
try:
domain = safe_eval(definition.domain, {'user': superuser})
# demmy search to make sure the domain is valid
obj.search(cr, uid, domain, context=context, count=True)
except (ValueError, SyntaxError), e:
msg = e.message or (e.msg + '\n' + e.text)
raise osv.except_osv(_('Error!'),_("The domain for the definition %s seems incorrect, please check it.\n\n%s" % (definition.name, msg)))
return True
def create(self, cr, uid, vals, context=None):
res_id = super(gamification_goal_definition, self).create(cr, uid, vals, context=context)
if vals.get('computation_mode') in ('count', 'sum'):
self._check_domain_validity(cr, uid, [res_id], context=context)
return res_id
def write(self, cr, uid, ids, vals, context=None):
res = super(gamification_goal_definition, self).write(cr, uid, ids, vals, context=context)
if vals.get('computation_mode', 'count') in ('count', 'sum') and (vals.get('domain') or vals.get('model_id')):
self._check_domain_validity(cr, uid, ids, context=context)
return res
def on_change_model_id(self, cr, uid, ids, model_id, context=None):
"""Prefill field model_inherited_model_ids"""
if not model_id:
return {'value': {'model_inherited_model_ids': []}}
model = self.pool['ir.model'].browse(cr, uid, model_id, context=context)
# format (6, 0, []) to construct the domain ('model_id', 'in', m and m[0] and m[0][2])
return {'value': {'model_inherited_model_ids': [(6, 0, [m.id for m in model.inherited_model_ids])]}}
class gamification_goal(osv.Model):
"""Goal instance for a user
An individual goal for a user on a specified time period"""
_name = 'gamification.goal'
_description = 'Gamification goal instance'
def _get_completion(self, cr, uid, ids, field_name, arg, context=None):
"""Return the percentage of completeness of the goal, between 0 and 100"""
res = dict.fromkeys(ids, 0.0)
for goal in self.browse(cr, uid, ids, context=context):
if goal.definition_condition == 'higher':
if goal.current >= goal.target_goal:
res[goal.id] = 100.0
else:
res[goal.id] = round(100.0 * goal.current / goal.target_goal, 2)
elif goal.current < goal.target_goal:
# a goal 'lower than' has only two values possible: 0 or 100%
res[goal.id] = 100.0
else:
res[goal.id] = 0.0
return res
def on_change_definition_id(self, cr, uid, ids, definition_id=False, context=None):
goal_definition = self.pool.get('gamification.goal.definition')
if not definition_id:
return {'value': {'definition_id': False}}
goal_definition = goal_definition.browse(cr, uid, definition_id, context=context)
return {'value': {'computation_mode': goal_definition.computation_mode, 'definition_condition': goal_definition.condition}}
_columns = {
'definition_id': fields.many2one('gamification.goal.definition', string='Goal Definition', required=True, ondelete="cascade"),
'user_id': fields.many2one('res.users', string='User', required=True, auto_join=True, ondelete="cascade"),
'line_id': fields.many2one('gamification.challenge.line', string='Challenge Line', ondelete="cascade"),
'challenge_id': fields.related('line_id', 'challenge_id',
string="Challenge",
type='many2one',
relation='gamification.challenge',
store=True, readonly=True,
help="Challenge that generated the goal, assign challenge to users to generate goals with a value in this field."),
'start_date': fields.date('Start Date'),
'end_date': fields.date('End Date'), # no start and end = always active
'target_goal': fields.float('To Reach',
required=True,
track_visibility='always'), # no goal = global index
'current': fields.float('Current Value', required=True, track_visibility='always'),
'completeness': fields.function(_get_completion, type='float', string='Completeness'),
'state': fields.selection([
('draft', 'Draft'),
('inprogress', 'In progress'),
('reached', 'Reached'),
('failed', 'Failed'),
('canceled', 'Canceled'),
],
string='State',
required=True,
track_visibility='always'),
'to_update': fields.boolean('To update'),
'closed': fields.boolean('Closed goal', help="These goals will not be recomputed."),
'computation_mode': fields.related('definition_id', 'computation_mode', type='char', string="Computation mode"),
'remind_update_delay': fields.integer('Remind delay',
help="The number of days after which the user assigned to a manual goal will be reminded. Never reminded if no value is specified."),
'last_update': fields.date('Last Update',
help="In case of manual goal, reminders are sent if the goal as not been updated for a while (defined in challenge). Ignored in case of non-manual goal or goal not linked to a challenge."),
'definition_description': fields.related('definition_id', 'description', type='char', string='Definition Description', readonly=True),
'definition_condition': fields.related('definition_id', 'condition', type='char', string='Definition Condition', readonly=True),
'definition_suffix': fields.related('definition_id', 'full_suffix', type="char", string="Suffix", readonly=True),
'definition_display': fields.related('definition_id', 'display_mode', type="char", string="Display Mode", readonly=True),
}
_defaults = {
'current': 0,
'state': 'draft',
'start_date': fields.date.today,
}
_order = 'start_date desc, end_date desc, definition_id, id'
def _check_remind_delay(self, cr, uid, goal, context=None):
"""Verify if a goal has not been updated for some time and send a
reminder message of needed.
:return: data to write on the goal object
"""
if goal.remind_update_delay and goal.last_update:
delta_max = timedelta(days=goal.remind_update_delay)
last_update = datetime.strptime(goal.last_update, DF).date()
if date.today() - last_update > delta_max:
# generate a remind report
temp_obj = self.pool.get('email.template')
template_id = self.pool['ir.model.data'].get_object(cr, uid, 'gamification', 'email_template_goal_reminder', context)
body_html = temp_obj.render_template(cr, uid, template_id.body_html, 'gamification.goal', goal.id, context=context)
self.pool['mail.thread'].message_post(cr, uid, 0, body=body_html, partner_ids=[goal.user_id.partner_id.id], context=context, subtype='mail.mt_comment')
return {'to_update': True}
return {}
def _get_write_values(self, cr, uid, goal, new_value, context=None):
"""Generate values to write after recomputation of a goal score"""
if new_value == goal.current:
# avoid useless write if the new value is the same as the old one
return {}
result = {goal.id: {'current': new_value}}
if (goal.definition_id.condition == 'higher' and new_value >= goal.target_goal) \
or (goal.definition_id.condition == 'lower' and new_value <= goal.target_goal):
# success, do no set closed as can still change
result[goal.id]['state'] = 'reached'
elif goal.end_date and fields.date.today() > goal.end_date:
# check goal failure
result[goal.id]['state'] = 'failed'
result[goal.id]['closed'] = True
return result
def update(self, cr, uid, ids, context=None):
"""Update the goals to recomputes values and change of states
If a manual goal is not updated for enough time, the user will be
reminded to do so (done only once, in 'inprogress' state).
If a goal reaches the target value, the status is set to reached
If the end date is passed (at least +1 day, time not considered) without
the target value being reached, the goal is set as failed."""
if context is None:
context = {}
commit = context.get('commit_gamification', False)
goals_by_definition = {}
for goal in self.browse(cr, uid, ids, context=context):
goals_by_definition.setdefault(goal.definition_id, []).append(goal)
for definition, goals in goals_by_definition.items():
goals_to_write = dict((goal.id, {}) for goal in goals)
if definition.computation_mode == 'manually':
for goal in goals:
goals_to_write[goal.id].update(self._check_remind_delay(cr, uid, goal, context))
elif definition.computation_mode == 'python':
# TODO batch execution
for goal in goals:
# execute the chosen method
cxt = {
'self': self.pool.get('gamification.goal'),
'object': goal,
'pool': self.pool,
'cr': cr,
'context': dict(context), # copy context to prevent side-effects of eval
'uid': uid,
'date': date, 'datetime': datetime, 'timedelta': timedelta, 'time': time
}
code = definition.compute_code.strip()
safe_eval(code, cxt, mode="exec", nocopy=True)
# the result of the evaluated codeis put in the 'result' local variable, propagated to the context
result = cxt.get('result')
if result is not None and type(result) in (float, int, long):
goals_to_write.update(
self._get_write_values(cr, uid, goal, result, context=context)
)
else:
_logger.exception(_('Invalid return content from the evaluation of code for definition %s') % definition.name)
else: # count or sum
obj = self.pool.get(definition.model_id.model)
field_date_name = definition.field_date_id and definition.field_date_id.name or False
if definition.computation_mode == 'count' and definition.batch_mode:
# batch mode, trying to do as much as possible in one request
general_domain = safe_eval(definition.domain)
field_name = definition.batch_distinctive_field.name
subqueries = {}
for goal in goals:
start_date = field_date_name and goal.start_date or False
end_date = field_date_name and goal.end_date or False
subqueries.setdefault((start_date, end_date), {}).update({goal.id:safe_eval(definition.batch_user_expression, {'user': goal.user_id})})
# the global query should be split by time periods (especially for recurrent goals)
for (start_date, end_date), query_goals in subqueries.items():
subquery_domain = list(general_domain)
subquery_domain.append((field_name, 'in', list(set(query_goals.values()))))
if start_date:
subquery_domain.append((field_date_name, '>=', start_date))
if end_date:
subquery_domain.append((field_date_name, '<=', end_date))
if field_name == 'id':
# grouping on id does not work and is similar to search anyway
user_ids = obj.search(cr, uid, subquery_domain, context=context)
user_values = [{'id': user_id, 'id_count': 1} for user_id in user_ids]
else:
user_values = obj.read_group(cr, uid, subquery_domain, fields=[field_name], groupby=[field_name], context=context)
# user_values has format of read_group: [{'partner_id': 42, 'partner_id_count': 3},...]
for goal in [g for g in goals if g.id in query_goals.keys()]:
for user_value in user_values:
queried_value = field_name in user_value and user_value[field_name] or False
if isinstance(queried_value, tuple) and len(queried_value) == 2 and isinstance(queried_value[0], (int, long)):
queried_value = queried_value[0]
if queried_value == query_goals[goal.id]:
new_value = user_value.get(field_name+'_count', goal.current)
goals_to_write.update(
self._get_write_values(cr, uid, goal, new_value, context=context)
)
else:
for goal in goals:
# eval the domain with user replaced by goal user object
domain = safe_eval(definition.domain, {'user': goal.user_id})
# add temporal clause(s) to the domain if fields are filled on the goal
if goal.start_date and field_date_name:
domain.append((field_date_name, '>=', goal.start_date))
if goal.end_date and field_date_name:
domain.append((field_date_name, '<=', goal.end_date))
if definition.computation_mode == 'sum':
field_name = definition.field_id.name
# TODO for master: group on user field in batch mode
res = obj.read_group(cr, uid, domain, [field_name], [], context=context)
new_value = res and res[0][field_name] or 0.0
else: # computation mode = count
new_value = obj.search(cr, uid, domain, context=context, count=True)
goals_to_write.update(
self._get_write_values(cr, uid, goal, new_value, context=context)
)
for goal_id, value in goals_to_write.items():
if not value:
continue
self.write(cr, uid, [goal_id], value, context=context)
if commit:
cr.commit()
return True
def action_start(self, cr, uid, ids, context=None):
"""Mark a goal as started.
This should only be used when creating goals manually (in draft state)"""
self.write(cr, uid, ids, {'state': 'inprogress'}, context=context)
return self.update(cr, uid, ids, context=context)
def action_reach(self, cr, uid, ids, context=None):
"""Mark a goal as reached.
If the target goal condition is not met, the state will be reset to In
Progress at the next goal update until the end date."""
return self.write(cr, uid, ids, {'state': 'reached'}, context=context)
def action_fail(self, cr, uid, ids, context=None):
"""Set the state of the goal to failed.
A failed goal will be ignored in future checks."""
return self.write(cr, uid, ids, {'state': 'failed'}, context=context)
def action_cancel(self, cr, uid, ids, context=None):
"""Reset the completion after setting a goal as reached or failed.
This is only the current state, if the date and/or target criterias
match the conditions for a change of state, this will be applied at the
next goal update."""
return self.write(cr, uid, ids, {'state': 'inprogress'}, context=context)
def create(self, cr, uid, vals, context=None):
"""Overwrite the create method to add a 'no_remind_goal' field to True"""
context = dict(context or {})
context['no_remind_goal'] = True
return super(gamification_goal, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
"""Overwrite the write method to update the last_update field to today
If the current value is changed and the report frequency is set to On
change, a report is generated
"""
if context is None:
context = {}
vals['last_update'] = fields.date.today()
result = super(gamification_goal, self).write(cr, uid, ids, vals, context=context)
for goal in self.browse(cr, uid, ids, context=context):
if goal.state != "draft" and ('definition_id' in vals or 'user_id' in vals):
# avoid drag&drop in kanban view
raise osv.except_osv(_('Error!'), _('Can not modify the configuration of a started goal'))
if vals.get('current'):
if 'no_remind_goal' in context:
# new goals should not be reported
continue
if goal.challenge_id and goal.challenge_id.report_message_frequency == 'onchange':
self.pool.get('gamification.challenge').report_progress(cr, SUPERUSER_ID, goal.challenge_id, users=[goal.user_id], context=context)
return result
def get_action(self, cr, uid, goal_id, context=None):
"""Get the ir.action related to update the goal
In case of a manual goal, should return a wizard to update the value
:return: action description in a dictionnary
"""
goal = self.browse(cr, uid, goal_id, context=context)
if goal.definition_id.action_id:
# open a the action linked to the goal
action = goal.definition_id.action_id.read()[0]
if goal.definition_id.res_id_field:
current_user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
action['res_id'] = safe_eval(goal.definition_id.res_id_field, {'user': current_user})
# if one element to display, should see it in form mode if possible
action['views'] = [(view_id, mode) for (view_id, mode) in action['views'] if mode == 'form'] or action['views']
return action
if goal.computation_mode == 'manually':
# open a wizard window to update the value manually
action = {
'name': _("Update %s") % goal.definition_id.name,
'id': goal_id,
'type': 'ir.actions.act_window',
'views': [[False, 'form']],
'target': 'new',
'context': {'default_goal_id': goal_id, 'default_current': goal.current},
'res_model': 'gamification.goal.wizard'
}
return action
return False
| agpl-3.0 |
noroutine/ansible | lib/ansible/modules/cloud/ovirt/ovirt_affinity_group.py | 50 | 11924 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_affinity_group
short_description: Module to manage affinity groups in oVirt/RHV
version_added: "2.3"
author:
- Ondra Machacek (@machacekondra)
description:
- "This module manage affinity groups in oVirt/RHV. It can also manage assignments
of those groups to VMs."
options:
name:
description:
- Name of the affinity group to manage.
required: true
state:
description:
- Should the affinity group be present or absent.
choices: [ absent, present ]
default: present
cluster:
description:
- Name of the cluster of the affinity group.
description:
description:
- Description of the affinity group.
host_enforcing:
description:
- If I(yes) VM cannot start on host if it does not satisfy the C(host_rule).
- This parameter is support since oVirt/RHV 4.1 version.
type: bool
host_rule:
description:
- If I(positive) I(all) VMs in this group should run on the this host.
- If I(negative) I(no) VMs in this group should run on the this host.
- This parameter is support since oVirt/RHV 4.1 version.
choices: [ negative, positive ]
vm_enforcing:
description:
- If I(yes) VM cannot start if it does not satisfy the C(vm_rule).
type: bool
vm_rule:
description:
- If I(positive) I(all) VMs in this group should run on the host defined by C(host_rule).
- If I(negative) I(no) VMs in this group should run on the host defined by C(host_rule).
- If I(disabled) this affinity group doesn't take effect.
choices: [ disabled, negative, positive ]
vms:
description:
- List of the VMs names, which should have assigned this affinity group.
hosts:
description:
- List of the hosts names, which should have assigned this affinity group.
- This parameter is support since oVirt/RHV 4.1 version.
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
- name: Create(if not exists) and assign affinity group to VMs vm1 and vm2 and host host1
ovirt_affinity_group:
name: mygroup
cluster: mycluster
vm_enforcing: true
vm_rule: positive
host_enforcing: true
host_rule: positive
vms:
- vm1
- vm2
hosts:
- host1
- name: Detach VMs from affinity group and disable VM rule
ovirt_affinity_group:
name: mygroup
cluster: mycluster
vm_enforcing: false
vm_rule: disabled
host_enforcing: true
host_rule: positive
vms: []
hosts:
- host1
- host2
- name: Remove affinity group
ovirt_affinity_group:
state: absent
cluster: mycluster
name: mygroup
'''
RETURN = '''
id:
description: ID of the affinity group which is managed
returned: On success if affinity group is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
affinity_group:
description: "Dictionary of all the affinity group attributes. Affinity group attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/affinity_group."
returned: On success if affinity group is found.
type: str
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
check_support,
create_connection,
get_id_by_name,
equal,
engine_supported,
ovirt_full_argument_spec,
search_by_name,
)
class AffinityGroupsModule(BaseModule):
def __init__(self, vm_ids, host_ids, *args, **kwargs):
super(AffinityGroupsModule, self).__init__(*args, **kwargs)
self._vm_ids = vm_ids
self._host_ids = host_ids
def update_vms(self, affinity_group):
"""
This method iterate via the affinity VM assignnments and datech the VMs
which should not be attached to affinity and attach VMs which should be
attached to affinity.
"""
assigned_vms = self.assigned_vms(affinity_group)
to_remove = [vm for vm in assigned_vms if vm not in self._vm_ids]
to_add = [vm for vm in self._vm_ids if vm not in assigned_vms]
ag_service = self._service.group_service(affinity_group.id)
for vm in to_remove:
ag_service.vms_service().vm_service(vm).remove()
for vm in to_add:
# API return <action> element instead of VM element, so we
# need to WA this issue, for oVirt/RHV versions having this bug:
try:
ag_service.vms_service().add(otypes.Vm(id=vm))
except ValueError as ex:
if 'complete' not in str(ex):
raise ex
def post_create(self, affinity_group):
self.update_vms(affinity_group)
def post_update(self, affinity_group):
self.update_vms(affinity_group)
def build_entity(self):
affinity_group = otypes.AffinityGroup(
name=self._module.params['name'],
description=self._module.params['description'],
positive=(
self._module.params['vm_rule'] == 'positive'
) if self._module.params['vm_rule'] is not None else None,
enforcing=(
self._module.params['vm_enforcing']
) if self._module.params['vm_enforcing'] is not None else None,
)
# Those attributes are Supported since 4.1:
if not engine_supported(self._connection, '4.1'):
return affinity_group
affinity_group.hosts_rule = otypes.AffinityRule(
positive=(
self.param('host_rule') == 'positive'
) if self.param('host_rule') is not None else None,
enforcing=self.param('host_enforcing'),
) if (
self.param('host_enforcing') is not None or
self.param('host_rule') is not None
) else None
affinity_group.vms_rule = otypes.AffinityRule(
positive=(
self.param('vm_rule') == 'positive'
) if self.param('vm_rule') is not None else None,
enforcing=self.param('vm_enforcing'),
enabled=(
self.param('vm_rule') in ['negative', 'positive']
) if self.param('vm_rule') is not None else None,
) if (
self.param('vm_enforcing') is not None or
self.param('vm_rule') is not None
) else None
affinity_group.hosts = [
otypes.Host(id=host_id) for host_id in self._host_ids
] if self._host_ids is not None else None
return affinity_group
def assigned_vms(self, affinity_group):
if getattr(affinity_group.vms, 'href', None):
return sorted([
vm.id for vm in self._connection.follow_link(affinity_group.vms)
])
else:
return sorted([vm.id for vm in affinity_group.vms])
def update_check(self, entity):
assigned_vms = self.assigned_vms(entity)
do_update = (
equal(self.param('description'), entity.description) and equal(self.param('vm_enforcing'), entity.enforcing) and equal(
self.param('vm_rule') == 'positive' if self.param('vm_rule') else None,
entity.positive
) and equal(self._vm_ids, assigned_vms)
)
# Following attributes is supported since 4.1,
# so return if it doesn't exist:
if not engine_supported(self._connection, '4.1'):
return do_update
# Following is supported since 4.1:
return do_update and (
equal(
self.param('host_rule') == 'positive' if self.param('host_rule') else None,
entity.hosts_rule.positive) and equal(self.param('host_enforcing'), entity.hosts_rule.enforcing) and equal(
self.param('vm_rule') in ['negative', 'positive'] if self.param('vm_rule') else None,
entity.vms_rule.enabled) and equal(self._host_ids, sorted([host.id for host in entity.hosts]))
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(type='str', default='present', choices=['absent', 'present']),
cluster=dict(type='str', required=True),
name=dict(type='str', required=True),
description=dict(type='str'),
vm_enforcing=dict(type='bool'),
vm_rule=dict(type='str', choices=['disabled', 'negative', 'positive']),
host_enforcing=dict(type='bool'),
host_rule=dict(type='str', choices=['negative', 'positive']),
vms=dict(type='list'),
hosts=dict(type='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if module._name == 'ovirt_affinity_groups':
module.deprecate("The 'ovirt_affinity_groups' module is being renamed 'ovirt_affinity_group'", version=2.8)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
# Check if unsupported parameters were passed:
supported_41 = ('host_enforcing', 'host_rule', 'hosts')
if not check_support(
version='4.1',
connection=connection,
module=module,
params=supported_41,
):
module.fail_json(
msg='Following parameters are supported since 4.1: {params}'.format(
params=supported_41,
)
)
clusters_service = connection.system_service().clusters_service()
vms_service = connection.system_service().vms_service()
hosts_service = connection.system_service().hosts_service()
cluster_name = module.params['cluster']
cluster = search_by_name(clusters_service, cluster_name)
if cluster is None:
raise Exception("Cluster '%s' was not found." % cluster_name)
cluster_service = clusters_service.cluster_service(cluster.id)
affinity_groups_service = cluster_service.affinity_groups_service()
# Fetch VM ids which should be assigned to affinity group:
vm_ids = sorted([
get_id_by_name(vms_service, vm_name)
for vm_name in module.params['vms']
]) if module.params['vms'] is not None else None
# Fetch host ids which should be assigned to affinity group:
host_ids = sorted([
get_id_by_name(hosts_service, host_name)
for host_name in module.params['hosts']
]) if module.params['hosts'] is not None else None
affinity_groups_module = AffinityGroupsModule(
connection=connection,
module=module,
service=affinity_groups_service,
vm_ids=vm_ids,
host_ids=host_ids,
)
state = module.params['state']
if state == 'present':
ret = affinity_groups_module.create()
elif state == 'absent':
ret = affinity_groups_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
datalogics-robb/scons | test/Fortran/USE-MODULE.py | 2 | 2622 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import string
import sys
import TestSCons
_python_ = TestSCons._python_
_exe = TestSCons._exe
test = TestSCons.TestSCons()
test.write('myfortran.py', r"""
import os.path
import re
import string
import sys
mod_regex = "(?im)^\\s*MODULE\\s+(?!PROCEDURE)(\\w+)"
contents = open(sys.argv[1]).read()
modules = re.findall(mod_regex, contents)
modules = map(lambda m: string.lower(m)+'.mod', modules)
for t in sys.argv[2:] + modules:
open(t, 'wb').write('myfortran.py wrote %s\n' % os.path.split(t)[1])
sys.exit(0)
""")
test.write('SConstruct', """
env = Environment(FORTRANCOM = r'%(_python_)s myfortran.py $SOURCE $TARGET')
env.Object(target = 'test1.obj', source = 'test1.f')
""" % locals())
test.write('test1.f', """\
PROGRAM TEST
USE MOD_FOO
USE MOD_BAR
PRINT *,'TEST.f'
CALL P
STOP
END
MODULE MOD_FOO
IMPLICIT NONE
CONTAINS
SUBROUTINE P
PRINT *,'mod_foo'
END SUBROUTINE P
END MODULE MOD_FOO
MODULE PROCEDURE MOD_BAR
IMPLICIT NONE
CONTAINS
SUBROUTINE P
PRINT *,'mod_bar'
END SUBROUTINE P
END MODULE MOD_BAR
""")
test.run(arguments = '.', stderr = None)
test.must_match('test1.obj', "myfortran.py wrote test1.obj\n")
test.must_match('mod_foo.mod', "myfortran.py wrote mod_foo.mod\n")
test.must_not_exist('mod_bar.mod')
test.up_to_date(arguments = '.')
test.pass_test()
| mit |
swapnakrishnan2k/tp-qemu | qemu/tests/macvtap_event_notification.py | 6 | 4152 | import logging
import time
from autotest.client.shared import error, utils
from virttest import utils_misc
from virttest import utils_net
from virttest import env_process
@error.context_aware
def run(test, params, env):
"""
Test qmp event notification function:
1) Boot up guest with qmp and macvtap.
2) In guest, change network interface to promisc state.
3) Try to catch qmp event notification in qmp monitor.
4) Execute query-rx-filter in host qmp session.
:param test: QEMU test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environmen.
"""
qemu_binary = utils_misc.get_qemu_binary(params)
if not utils_misc.qemu_has_option("qmp", qemu_binary):
error.TestNAError("This test case requires a host QEMU with QMP "
"monitor support")
if params.get("nettype", "macvtap") != "macvtap":
error.TestNAError("This test case test macvtap.")
params["start_vm"] = "yes"
vm_name = params.get("main_vm", "vm1")
env_process.preprocess_vm(test, params, env, vm_name)
vm = env.get_vm(vm_name)
vm.verify_alive()
event_cmd = params.get("event_cmd")
event_cmd_type = params.get("event_cmd_type")
event_check = params.get("event_check")
timeout = int(params.get("check_timeout", 360))
pre_cmd = params.get("pre_cmd")
post_cmd = params.get("post_cmd")
post_cmd_type = params.get("post_cmd_type")
session = vm.wait_for_serial_login(timeout=int(params.get("login_timeout",
360)))
callback = {"host_cmd": utils.system_output,
"guest_cmd": session.get_command_output,
"qmp_cmd": vm.get_monitors_by_type("qmp")[0].send_args_cmd}
def send_cmd(cmd, cmd_type):
if cmd_type in callback.keys():
return callback[cmd_type](cmd)
else:
raise error.TestError("cmd_type is not supported")
if pre_cmd:
error.context("Run pre_cmd '%s'", logging.info)
pre_cmd_type = params.get("pre_cmd_type", event_cmd_type)
send_cmd(pre_cmd, pre_cmd_type)
mac = vm.get_mac_address()
interface_name = utils_net.get_linux_ifname(session, mac)
error.context("In guest, change network interface to promisc state.",
logging.info)
event_cmd = params.get("event_cmd") % interface_name
send_cmd(event_cmd, event_cmd_type)
error.context("Try to get qmp events in %s seconds!" % timeout,
logging.info)
end_time = time.time() + timeout
qmp_monitors = vm.get_monitors_by_type("qmp")
qmp_num = len(qmp_monitors)
while time.time() < end_time:
for monitor in qmp_monitors:
event = monitor.get_event(event_check)
if event:
txt = "Monitr %s " % monitor.name
txt += "receive qmp %s event notification" % event_check
logging.info(txt)
qmp_num -= 1
qmp_monitors.remove(monitor)
time.sleep(5)
if qmp_num <= 0:
break
if qmp_num > 0:
output = session.cmd("ip link show")
err = "Monitor(s) "
for monitor in qmp_monitors:
err += "%s " % monitor.name
err += " did not receive qmp %s event notification." % event_check
err += " ip link show command output in guest: %s" % output
raise error.TestFail(err)
if post_cmd:
for nic in vm.virtnet:
post_cmd = post_cmd % nic.device_id
error.context("Run post_cmd '%s'" % post_cmd, logging.info)
post_cmd_type = params.get("post_cmd_type", event_cmd_type)
output = send_cmd(post_cmd, post_cmd_type)
post_cmd_check = params.get("post_cmd_check")
if post_cmd_check:
if post_cmd_check not in str(output):
err = "Did not find '%s' in " % post_cmd_check
err += "'%s' command's output: %s" % (post_cmd, output)
raise error.TestFail(err)
if session:
session.close()
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.