repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
joshuajan/odoo | addons/account_followup/report/account_followup_print.py | 40 | 5707 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from collections import defaultdict
from openerp.osv import osv
from openerp.report import report_sxw
class report_rappel(report_sxw.rml_parse):
_name = "account_followup.report.rappel"
def __init__(self, cr, uid, name, context=None):
super(report_rappel, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'ids_to_objects': self._ids_to_objects,
'getLines': self._lines_get,
'get_text': self._get_text
})
def _ids_to_objects(self, ids):
all_lines = []
for line in self.pool['account_followup.stat.by.partner'].browse(self.cr, self.uid, ids):
if line not in all_lines:
all_lines.append(line)
return all_lines
def _lines_get(self, stat_by_partner_line):
return self._lines_get_with_partner(stat_by_partner_line.partner_id, stat_by_partner_line.company_id.id)
def _lines_get_with_partner(self, partner, company_id):
moveline_obj = self.pool['account.move.line']
moveline_ids = moveline_obj.search(self.cr, self.uid, [
('partner_id', '=', partner.id),
('account_id.type', '=', 'receivable'),
('reconcile_id', '=', False),
('state', '!=', 'draft'),
('company_id', '=', company_id),
])
# lines_per_currency = {currency: [line data, ...], ...}
lines_per_currency = defaultdict(list)
for line in moveline_obj.browse(self.cr, self.uid, moveline_ids):
currency = line.currency_id or line.company_id.currency_id
line_data = {
'name': line.move_id.name,
'ref': line.ref,
'date': line.date,
'date_maturity': line.date_maturity,
'balance': line.amount_currency if currency != line.company_id.currency_id else line.debit - line.credit,
'blocked': line.blocked,
'currency_id': currency,
}
lines_per_currency[currency].append(line_data)
return [{'line': lines} for lines in lines_per_currency.values()]
def _get_text(self, stat_line, followup_id, context=None):
if context is None:
context = {}
context.update({'lang': stat_line.partner_id.lang})
fp_obj = self.pool['account_followup.followup']
fp_line = fp_obj.browse(self.cr, self.uid, followup_id, context=context).followup_line
if not fp_line:
raise osv.except_osv(_('Error!'),_("The followup plan defined for the current company does not have any followup action."))
#the default text will be the first fp_line in the sequence with a description.
default_text = ''
li_delay = []
for line in fp_line:
if not default_text and line.description:
default_text = line.description
li_delay.append(line.delay)
li_delay.sort(reverse=True)
a = {}
#look into the lines of the partner that already have a followup level, and take the description of the higher level for which it is available
partner_line_ids = self.pool['account.move.line'].search(self.cr, self.uid, [('partner_id','=',stat_line.partner_id.id),('reconcile_id','=',False),('company_id','=',stat_line.company_id.id),('blocked','=',False),('state','!=','draft'),('debit','!=',False),('account_id.type','=','receivable'),('followup_line_id','!=',False)])
partner_max_delay = 0
partner_max_text = ''
for i in self.pool['account.move.line'].browse(self.cr, self.uid, partner_line_ids, context=context):
if i.followup_line_id.delay > partner_max_delay and i.followup_line_id.description:
partner_max_delay = i.followup_line_id.delay
partner_max_text = i.followup_line_id.description
text = partner_max_delay and partner_max_text or default_text
if text:
text = text % {
'partner_name': stat_line.partner_id.name,
'date': time.strftime('%Y-%m-%d'),
'company_name': stat_line.company_id.name,
'user_signature': self.pool['res.users'].browse(self.cr, self.uid, self.uid, context).signature or '',
}
return text
class report_followup(osv.AbstractModel):
_name = 'report.account_followup.report_followup'
_inherit = 'report.abstract_report'
_template = 'account_followup.report_followup'
_wrapped_report_class = report_rappel
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kushan02/Kushan_Kernel_I9505 | scripts/build-all.py | 133 | 9588 | #! /usr/bin/env python
# Copyright (c) 2009-2011, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'CROSS_COMPILE': 'arm-none-linux-gnueabi-',
'KCONFIG_NOTIMESTAMP': 'true' })
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
for n in glob.glob('arch/arm/configs/[fm]sm[0-9-]*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/qsd*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/apq*_defconfig'):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'SELINUX_DEFCONFIG=selinux_defconfig',
'SELINUX_LOG_DEFCONFIG=selinux_log_defconfig',
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
build = Builder(log_name)
result = build.run(['make', 'O=%s' % dest_dir] + make_command)
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" % (target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 |
nisse3000/pymatgen | pymatgen/analysis/chemenv/coordination_environments/chemenv_strategies.py | 3 | 98578 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module provides so-called "strategies" to determine the coordination environments of an atom in a structure.
Some strategies can favour larger or smaller environments. Some strategies uniquely identifies the environments while
some others can identify the environment as a "mix" of several environments, each of which is assigned with a given
fraction. The choice of the strategy depends on the purpose of the user.
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Geoffroy Hautier"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
import abc
import os
import json
from monty.json import MSONable
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.core.operations import SymmOp
from pymatgen.core.sites import PeriodicSite
import numpy as np
from scipy.stats import gmean
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import UNCLEAR_ENVIRONMENT_SYMBOL
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import get_lower_and_upper_f
from pymatgen.analysis.chemenv.utils.func_utils import CSMFiniteRatioFunction
from pymatgen.analysis.chemenv.utils.func_utils import CSMInfiniteRatioFunction
from pymatgen.analysis.chemenv.utils.func_utils import DeltaCSMRatioFunction
from pymatgen.analysis.chemenv.utils.func_utils import RatioFunction
from pymatgen.analysis.chemenv.utils.chemenv_errors import EquivalentSiteSearchError
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import AllCoordinationGeometries
from pymatgen.analysis.chemenv.utils.defs_utils import AdditionalConditions
from six import with_metaclass
from pymatgen.analysis.chemenv.coordination_environments.voronoi import DetailedVoronoiContainer
from collections import OrderedDict
module_dir = os.path.dirname(os.path.abspath(__file__))
MPSYMBOL_TO_CN = AllCoordinationGeometries().get_symbol_cn_mapping()
ALLCG = AllCoordinationGeometries()
class StrategyOption(with_metaclass(abc.ABCMeta, MSONable)):
allowed_values = None
@abc.abstractmethod
def as_dict(self):
"""
A JSON serializable dict representation of this strategy option.
"""
pass
class DistanceCutoffFloat(float, StrategyOption):
allowed_values = 'Real number between 1.0 and +infinity'
def __new__(cls, myfloat):
flt = float.__new__(cls, myfloat)
if flt < 1.0:
raise ValueError("Distance cutoff should be between 1.0 and +infinity")
return flt
def as_dict(self):
return {'@module': self.__class__.__module__,
'@class': self.__class__.__name__,
'value': self}
@classmethod
def from_dict(cls, d):
return cls(d['value'])
class AngleCutoffFloat(float, StrategyOption):
allowed_values = 'Real number between 0.0 and 1.0'
def __new__(cls, myfloat):
flt = float.__new__(cls, myfloat)
if flt < 0.0 or flt > 1.0:
raise ValueError("Angle cutoff should be between 0.0 and 1.0")
return flt
def as_dict(self):
return {'@module': self.__class__.__module__,
'@class': self.__class__.__name__,
'value': self}
@classmethod
def from_dict(cls, d):
return cls(d['value'])
class CSMFloat(float, StrategyOption):
allowed_values = 'Real number between 0.0 and 100.0'
def __new__(cls, myfloat):
flt = float.__new__(cls, myfloat)
if flt < 0.0 or flt > 100.0:
raise ValueError("Continuous symmetry measure limits should be between 0.0 and 100.0")
return flt
def as_dict(self):
return {'@module': self.__class__.__module__,
'@class': self.__class__.__name__,
'value': self}
@classmethod
def from_dict(cls, d):
return cls(d['value'])
class AdditionalConditionInt(int, StrategyOption):
allowed_values = 'Integer amongst :\n'
for integer, description in AdditionalConditions.CONDITION_DESCRIPTION.items():
allowed_values += ' - {:d} for "{}"\n'.format(integer, description)
def __new__(cls, integer):
if str(int(integer)) != str(integer):
raise ValueError("Additional condition {} is not an integer".format(str(integer)))
intger = int.__new__(cls, integer)
if intger not in AdditionalConditions.ALL:
raise ValueError("Additional condition {:d} is not allowed".format(integer))
return intger
def as_dict(self):
return {'@module': self.__class__.__module__,
'@class': self.__class__.__name__,
'value': self}
@classmethod
def from_dict(cls, d):
return cls(d['value'])
class AbstractChemenvStrategy(with_metaclass(abc.ABCMeta, MSONable)):
"""
Class used to define a Chemenv strategy for the neighbors and coordination environment to be applied to a
StructureEnvironments object
"""
AC = AdditionalConditions()
STRATEGY_OPTIONS = OrderedDict()
STRATEGY_DESCRIPTION = None
STRATEGY_INFO_FIELDS = []
DEFAULT_SYMMETRY_MEASURE_TYPE = 'csm_wcs_ctwcc'
def __init__(self, structure_environments=None, symmetry_measure_type=DEFAULT_SYMMETRY_MEASURE_TYPE):
"""
Abstract constructor for the all chemenv strategies.
:param structure_environments: StructureEnvironments object containing all the information on the
coordination of the sites in a structure
"""
self.structure_environments = None
if structure_environments is not None:
self.set_structure_environments(structure_environments)
self._symmetry_measure_type = symmetry_measure_type
@property
def symmetry_measure_type(self):
return self._symmetry_measure_type
def set_structure_environments(self, structure_environments):
self.structure_environments = structure_environments
if not isinstance(self.structure_environments.voronoi, DetailedVoronoiContainer):
raise ValueError('Voronoi Container not of type "DetailedVoronoiContainer"')
self.prepare_symmetries()
def prepare_symmetries(self):
try:
self.spg_analyzer = SpacegroupAnalyzer(self.structure_environments.structure)
self.symops = self.spg_analyzer.get_symmetry_operations()
except:
self.symops = []
def equivalent_site_index_and_transform(self, psite):
# Get the index of the site in the unit cell of which the PeriodicSite psite is a replica.
try:
isite = self.structure_environments.structure.index(psite)
except ValueError:
try:
uc_psite = psite.to_unit_cell
isite = self.structure_environments.structure.index(uc_psite)
except ValueError:
for isite2, site2 in enumerate(self.structure_environments.structure):
if psite.is_periodic_image(site2):
isite = isite2
break
# Get the translation between psite and its corresponding site in the unit cell (Translation I)
thissite = self.structure_environments.structure[isite]
dthissite = psite.frac_coords - thissite.frac_coords
# Get the translation between the equivalent site for which the neighbors have been computed and the site in
# the unit cell that corresponds to psite (Translation II)
equivsite = self.structure_environments.structure[self.structure_environments.sites_map[isite]].to_unit_cell
#equivsite = self.structure_environments.structure[self.structure_environments.sites_map[isite]]
dequivsite = (self.structure_environments.structure[self.structure_environments.sites_map[isite]].frac_coords
- equivsite.frac_coords)
found = False
# Find the symmetry that applies the site in the unit cell to the equivalent site, as well as the translation
# that gets back the site to the unit cell (Translation III)
#TODO: check that these tolerances are needed, now that the structures are refined before analyzing environments
tolerances = [1e-8, 1e-7, 1e-6, 1e-5, 1e-4]
for tolerance in tolerances:
for symop in self.symops:
newsite = PeriodicSite(equivsite._species, symop.operate(equivsite.frac_coords), equivsite._lattice)
if newsite.is_periodic_image(thissite, tolerance=tolerance):
mysym = symop
dthissite2 = thissite.frac_coords - newsite.frac_coords
found = True
break
if not found:
symops = [SymmOp.from_rotation_and_translation()]
for symop in symops:
newsite = PeriodicSite(equivsite._species, symop.operate(equivsite.frac_coords), equivsite._lattice)
#if newsite.is_periodic_image(thissite):
if newsite.is_periodic_image(thissite, tolerance=tolerance):
mysym = symop
dthissite2 = thissite.frac_coords - newsite.frac_coords
found = True
break
if found:
break
if not found:
raise EquivalentSiteSearchError(psite)
return [self.structure_environments.sites_map[isite], dequivsite, dthissite + dthissite2, mysym]
@abc.abstractmethod
def get_site_neighbors(self, site):
"""
Applies the strategy to the structure_environments object in order to get the neighbors of a given site.
:param site: Site for which the neighbors are looked for
:param structure_environments: StructureEnvironments object containing all the information needed to get the
neighbors of the site
:return: The list of neighbors of the site. For complex strategies, where one allows multiple solutions, this
can return a list of list of neighbors
"""
raise NotImplementedError()
@property
def uniquely_determines_coordination_environments(self):
"""
Returns True if the strategy leads to a unique coordination environment, False otherwise.
:return: True if the strategy leads to a unique coordination environment, False otherwise.
"""
raise NotImplementedError()
@abc.abstractmethod
def get_site_coordination_environment(self, site):
"""
Applies the strategy to the structure_environments object in order to define the coordination environment of
a given site.
:param site: Site for which the coordination environment is looked for
:return: The coordination environment of the site. For complex strategies, where one allows multiple
solutions, this can return a list of coordination environments for the site
"""
raise NotImplementedError()
@abc.abstractmethod
def get_site_coordination_environments(self, site):
"""
Applies the strategy to the structure_environments object in order to define the coordination environment of
a given site.
:param site: Site for which the coordination environment is looked for
:return: The coordination environment of the site. For complex strategies, where one allows multiple
solutions, this can return a list of coordination environments for the site
"""
raise NotImplementedError()
@abc.abstractmethod
def get_site_coordination_environments_fractions(self, site, isite=None, dequivsite=None, dthissite=None,
mysym=None, ordered=True, min_fraction=0.0, return_maps=True,
return_strategy_dict_info=False):
"""
Applies the strategy to the structure_environments object in order to define the coordination environment of
a given site.
:param site: Site for which the coordination environment is looked for
:return: The coordination environment of the site. For complex strategies, where one allows multiple
solutions, this can return a list of coordination environments for the site
"""
raise NotImplementedError()
def get_site_ce_fractions_and_neighbors(self, site, full_ce_info=False, strategy_info=False):
"""
Applies the strategy to the structure_environments object in order to get coordination environments, their
fraction, csm, geometry_info, and neighbors
:param site: Site for which the above information is seeked
:return: The list of neighbors of the site. For complex strategies, where one allows multiple solutions, this
can return a list of list of neighbors
"""
[isite, dequivsite, dthissite, mysym] = self.equivalent_site_index_and_transform(site)
geoms_and_maps_list = self.get_site_coordination_environments_fractions(site=site, isite=isite,
dequivsite=dequivsite,
dthissite=dthissite, mysym=mysym,
return_maps=True,
return_strategy_dict_info=True)
if geoms_and_maps_list is None:
return None
site_nbs_sets = self.structure_environments.neighbors_sets[isite]
ce_and_neighbors = []
for fractions_dict in geoms_and_maps_list:
ce_map = fractions_dict['ce_map']
ce_nb_set = site_nbs_sets[ce_map[0]][ce_map[1]]
neighbors = [{'site': nb_site_and_index['site'],
'index': nb_site_and_index['index']}
for nb_site_and_index in ce_nb_set.neighb_sites_and_indices]
fractions_dict['neighbors'] = neighbors
ce_and_neighbors.append(fractions_dict)
return ce_and_neighbors
def set_option(self, option_name, option_value):
self.__setattr__(option_name, option_value)
def setup_options(self, all_options_dict):
for option_name, option_value in all_options_dict.items():
self.set_option(option_name, option_value)
@abc.abstractmethod
def __eq__(self, other):
"""
Equality method that should be implemented for any strategy
:param other: strategy to be compared with the current one
:return:
"""
raise NotImplementedError()
def __str__(self):
out = ' Chemenv Strategy "{}"\n'.format(self.__class__.__name__)
out += ' {}\n\n'.format('='*(19+len(self.__class__.__name__)))
out += ' Description :\n {}\n'.format('-'*13)
out += self.STRATEGY_DESCRIPTION
out += '\n\n'
out += ' Options :\n {}\n'.format('-'*9)
for option_name, option_dict in self.STRATEGY_OPTIONS.items():
out += ' - {} : {}\n'.format(option_name, str(getattr(self, option_name)))
return out
@abc.abstractmethod
def as_dict(self):
"""
Bson-serializable dict representation of the SimplestChemenvStrategy object.
:return: Bson-serializable dict representation of the SimplestChemenvStrategy object.
"""
raise NotImplementedError()
@classmethod
def from_dict(cls, d):
"""
Reconstructs the SimpleAbundanceChemenvStrategy object from a dict representation of the
SimpleAbundanceChemenvStrategy object created using the as_dict method.
:param d: dict representation of the SimpleAbundanceChemenvStrategy object
:return: StructureEnvironments object
"""
raise NotImplementedError()
class SimplestChemenvStrategy(AbstractChemenvStrategy):
"""
Simplest ChemenvStrategy using fixed angle and distance parameters for the definition of neighbors in the
Voronoi approach. The coordination environment is then given as the one with the lowest continuous symmetry measure
"""
# Default values for the distance and angle cutoffs
DEFAULT_DISTANCE_CUTOFF = 1.4
DEFAULT_ANGLE_CUTOFF = 0.3
DEFAULT_CONTINUOUS_SYMMETRY_MEASURE_CUTOFF = 10.0
DEFAULT_ADDITIONAL_CONDITION = AbstractChemenvStrategy.AC.ONLY_ACB
STRATEGY_OPTIONS = OrderedDict()
STRATEGY_OPTIONS['distance_cutoff'] = {'type': DistanceCutoffFloat, 'internal': '_distance_cutoff',
'default': DEFAULT_DISTANCE_CUTOFF}
STRATEGY_OPTIONS['angle_cutoff'] = {'type': AngleCutoffFloat, 'internal': '_angle_cutoff',
'default': DEFAULT_ANGLE_CUTOFF}
STRATEGY_OPTIONS['additional_condition'] = {'type': AdditionalConditionInt,
'internal': '_additional_condition',
'default': DEFAULT_ADDITIONAL_CONDITION}
STRATEGY_OPTIONS['continuous_symmetry_measure_cutoff'] = {'type': CSMFloat,
'internal': '_continuous_symmetry_measure_cutoff',
'default': DEFAULT_CONTINUOUS_SYMMETRY_MEASURE_CUTOFF}
STRATEGY_DESCRIPTION = ' Simplest ChemenvStrategy using fixed angle and distance parameters \n' \
' for the definition of neighbors in the Voronoi approach. \n' \
' The coordination environment is then given as the one with the \n' \
' lowest continuous symmetry measure.'
def __init__(self, structure_environments=None, distance_cutoff=DEFAULT_DISTANCE_CUTOFF,
angle_cutoff=DEFAULT_ANGLE_CUTOFF, additional_condition=DEFAULT_ADDITIONAL_CONDITION,
continuous_symmetry_measure_cutoff=DEFAULT_CONTINUOUS_SYMMETRY_MEASURE_CUTOFF,
symmetry_measure_type=AbstractChemenvStrategy.DEFAULT_SYMMETRY_MEASURE_TYPE):
"""
Constructor for this SimplestChemenvStrategy.
:param distance_cutoff: Distance cutoff used
:param angle_cutoff: Angle cutoff used
"""
AbstractChemenvStrategy.__init__(self, structure_environments, symmetry_measure_type=symmetry_measure_type)
self.distance_cutoff = distance_cutoff
self.angle_cutoff = angle_cutoff
self.additional_condition = additional_condition
self.continuous_symmetry_measure_cutoff = continuous_symmetry_measure_cutoff
@property
def uniquely_determines_coordination_environments(self):
return True
@property
def distance_cutoff(self):
return self._distance_cutoff
@distance_cutoff.setter
def distance_cutoff(self, distance_cutoff):
self._distance_cutoff = DistanceCutoffFloat(distance_cutoff)
@property
def angle_cutoff(self):
return self._angle_cutoff
@angle_cutoff.setter
def angle_cutoff(self, angle_cutoff):
self._angle_cutoff = AngleCutoffFloat(angle_cutoff)
@property
def additional_condition(self):
return self._additional_condition
@additional_condition.setter
def additional_condition(self, additional_condition):
self._additional_condition = AdditionalConditionInt(additional_condition)
@property
def continuous_symmetry_measure_cutoff(self):
return self._continuous_symmetry_measure_cutoff
@continuous_symmetry_measure_cutoff.setter
def continuous_symmetry_measure_cutoff(self, continuous_symmetry_measure_cutoff):
self._continuous_symmetry_measure_cutoff = CSMFloat(continuous_symmetry_measure_cutoff)
def get_site_neighbors(self, site, isite=None, dequivsite=None, dthissite=None, mysym=None):#, neighbors_map=None):
#if neighbors_map is not None:
# return self.structure_environments.voronoi.get_neighbors(isite=isite, neighbors_map=neighbors_map)
if isite is None:
[isite, dequivsite, dthissite, mysym] = self.equivalent_site_index_and_transform(site)
ce, cn_map = self.get_site_coordination_environment(site=site, isite=isite,
dequivsite=dequivsite, dthissite=dthissite, mysym=mysym,
return_map=True)
nb_set = self.structure_environments.neighbors_sets[isite][cn_map[0]][cn_map[1]]
eqsite_ps = nb_set.neighb_sites
coordinated_neighbors = []
for ips, ps in enumerate(eqsite_ps):
coords = mysym.operate(ps.frac_coords + dequivsite) + dthissite
ps_site = PeriodicSite(ps._species, coords, ps._lattice)
coordinated_neighbors.append(ps_site)
return coordinated_neighbors
def get_site_coordination_environment(self, site, isite=None, dequivsite=None, dthissite=None, mysym=None,
return_map=False):
if isite is None:
[isite, dequivsite, dthissite, mysym] = self.equivalent_site_index_and_transform(site)
neighbors_normalized_distances = self.structure_environments.voronoi.neighbors_normalized_distances[isite]
neighbors_normalized_angles = self.structure_environments.voronoi.neighbors_normalized_angles[isite]
idist = None
for iwd, wd in enumerate(neighbors_normalized_distances):
if self.distance_cutoff >= wd['min']:
idist = iwd
else:
break
iang = None
for iwa, wa in enumerate(neighbors_normalized_angles):
if self.angle_cutoff <= wa['max']:
iang = iwa
else:
break
if idist is None or iang is None:
raise ValueError('Distance or angle parameter not found ...')
my_cn = None
my_inb_set = None
found = False
for cn, nb_sets in self.structure_environments.neighbors_sets[isite].items():
for inb_set, nb_set in enumerate(nb_sets):
sources = [src for src in nb_set.sources
if src['origin'] == 'dist_ang_ac_voronoi' and src['ac'] == self.additional_condition]
for src in sources:
if src['idp'] == idist and src['iap'] == iang:
my_cn = cn
my_inb_set = inb_set
found = True
break
if found:
break
if found:
break
if not found:
return None
cn_map = (my_cn, my_inb_set)
ce = self.structure_environments.ce_list[self.structure_environments.sites_map[isite]][cn_map[0]][cn_map[1]]
if ce is None:
return None
coord_geoms = ce.coord_geoms
if return_map:
if coord_geoms is None:
return cn_map[0], cn_map
return (ce.minimum_geometry(symmetry_measure_type=self._symmetry_measure_type), cn_map)
else:
if coord_geoms is None:
return cn_map[0]
return ce.minimum_geometry(symmetry_measure_type=self._symmetry_measure_type)
def get_site_coordination_environments_fractions(self, site, isite=None, dequivsite=None, dthissite=None,
mysym=None, ordered=True, min_fraction=0.0, return_maps=True,
return_strategy_dict_info=False):
if isite is None or dequivsite is None or dthissite is None or mysym is None:
[isite, dequivsite, dthissite, mysym] = self.equivalent_site_index_and_transform(site)
site_nb_sets = self.structure_environments.neighbors_sets[isite]
if site_nb_sets is None:
return None
ce_and_map = self.get_site_coordination_environment(site=site, isite=isite, dequivsite=dequivsite,
dthissite=dthissite, mysym=mysym,
return_map=True)
if ce_and_map is None:
return None
ce, ce_map = ce_and_map
if ce is None:
ce_dict = {'ce_symbol': 'UNKNOWN:{:d}'.format(ce_map[0]), 'ce_dict': None, 'ce_fraction': 1.0}
else:
ce_dict = {'ce_symbol': ce[0], 'ce_dict': ce[1], 'ce_fraction': 1.0}
if return_maps:
ce_dict['ce_map'] = ce_map
if return_strategy_dict_info:
ce_dict['strategy_info'] = {}
fractions_info_list = [ce_dict]
return fractions_info_list
def get_site_coordination_environments(self, site, isite=None, dequivsite=None, dthissite=None, mysym=None,
return_maps=False):
return [self.get_site_coordination_environment(site=site, isite=isite, dequivsite=dequivsite,
dthissite=dthissite, mysym=mysym, return_map=return_maps)]
def add_strategy_visualization_to_subplot(self, subplot, visualization_options=None, plot_type=None):
subplot.plot(self._distance_cutoff, self._angle_cutoff, 'o', mec=None, mfc='w', markersize=12)
subplot.plot(self._distance_cutoff, self._angle_cutoff, 'x', linewidth=2, markersize=12)
def __eq__(self, other):
return (self.__class__.__name__ == other.__class__.__name__ and
self._distance_cutoff == other._distance_cutoff and self._angle_cutoff == other._angle_cutoff and
self._additional_condition == other._additional_condition and
self._continuous_symmetry_measure_cutoff == other._continuous_symmetry_measure_cutoff and
self.symmetry_measure_type == other.symmetry_measure_type)
def as_dict(self):
"""
Bson-serializable dict representation of the SimplestChemenvStrategy object.
:return: Bson-serializable dict representation of the SimplestChemenvStrategy object.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"distance_cutoff": float(self._distance_cutoff),
"angle_cutoff": float(self._angle_cutoff),
"additional_condition": int(self._additional_condition),
"continuous_symmetry_measure_cutoff": float(self._continuous_symmetry_measure_cutoff),
"symmetry_measure_type": self._symmetry_measure_type}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the SimplestChemenvStrategy object from a dict representation of the SimplestChemenvStrategy object
created using the as_dict method.
:param d: dict representation of the SimplestChemenvStrategy object
:return: StructureEnvironments object
"""
return cls(distance_cutoff=d["distance_cutoff"], angle_cutoff=d["angle_cutoff"],
additional_condition=d["additional_condition"],
continuous_symmetry_measure_cutoff=d["continuous_symmetry_measure_cutoff"],
symmetry_measure_type=d["symmetry_measure_type"])
class SimpleAbundanceChemenvStrategy(AbstractChemenvStrategy):
"""
Simple ChemenvStrategy using the neighbors that are the most "abundant" in the grid of angle and distance
parameters for the definition of neighbors in the Voronoi approach.
The coordination environment is then given as the one with the lowest continuous symmetry measure
"""
DEFAULT_MAX_DIST = 2.0
DEFAULT_ADDITIONAL_CONDITION = AbstractChemenvStrategy.AC.ONLY_ACB
STRATEGY_OPTIONS = OrderedDict()
STRATEGY_OPTIONS['additional_condition'] = {'type': AdditionalConditionInt,
'internal': '_additional_condition',
'default': DEFAULT_ADDITIONAL_CONDITION}
STRATEGY_OPTIONS['surface_calculation_type'] = {}
STRATEGY_DESCRIPTION = ' Simple Abundance ChemenvStrategy using the most "abundant" neighbors map \n' \
' for the definition of neighbors in the Voronoi approach. \n' \
' The coordination environment is then given as the one with the \n' \
' lowest continuous symmetry measure.'
def __init__(self, structure_environments=None,
additional_condition=AbstractChemenvStrategy.AC.ONLY_ACB,
symmetry_measure_type=AbstractChemenvStrategy.DEFAULT_SYMMETRY_MEASURE_TYPE):
"""
Constructor for the SimpleAbundanceChemenvStrategy.
:param structure_environments: StructureEnvironments object containing all the information on the
coordination of the sites in a structure
"""
raise NotImplementedError('SimpleAbundanceChemenvStrategy not yet implemented')
AbstractChemenvStrategy.__init__(self, structure_environments, symmetry_measure_type=symmetry_measure_type)
self._additional_condition = additional_condition
@property
def uniquely_determines_coordination_environments(self):
return True
def get_site_neighbors(self, site):
[isite, dequivsite, dthissite, mysym] = self.equivalent_site_index_and_transform(site)
cn_map = self._get_map(isite)
eqsite_ps = (self.structure_environments.unique_coordinated_neighbors(isite, cn_map=cn_map))
coordinated_neighbors = []
for ips, ps in enumerate(eqsite_ps):
coords = mysym.operate(ps.frac_coords + dequivsite) + dthissite
ps_site = PeriodicSite(ps._species, coords, ps._lattice)
coordinated_neighbors.append(ps_site)
return coordinated_neighbors
def get_site_coordination_environment(self, site, isite=None, dequivsite=None, dthissite=None, mysym=None,
return_map=False):
if isite is None:
[isite, dequivsite, dthissite, mysym] = self.equivalent_site_index_and_transform(site)
cn_map = self._get_map(isite)
if cn_map is None:
return None
coord_geoms = (self.structure_environments.
ce_list[self.structure_environments.sites_map[isite]][cn_map[0]][cn_map[1]])
if return_map:
if coord_geoms is None:
return cn_map[0], cn_map
return coord_geoms.minimum_geometry(symmetry_measure_type=self._symmetry_measure_type), cn_map
else:
if coord_geoms is None:
return cn_map[0]
return coord_geoms.minimum_geometry(symmetry_measure_type=self._symmetry_measure_type)
def get_site_coordination_environments(self, site, isite=None, dequivsite=None, dthissite=None, mysym=None,
return_maps=False):
return [self.get_site_coordination_environment(site=site, isite=isite, dequivsite=dequivsite,
dthissite=dthissite, mysym=mysym, return_map=return_maps)]
def _get_map(self, isite):
maps_and_surfaces = self._get_maps_surfaces(isite)
if maps_and_surfaces is None:
return None
surface_max = 0.0
imax = -1
for ii, map_and_surface in enumerate(maps_and_surfaces):
all_additional_conditions = [ac[2] for ac in map_and_surface['parameters_indices']]
if self._additional_condition in all_additional_conditions and map_and_surface['surface'] > surface_max:
surface_max = map_and_surface['surface']
imax = ii
return maps_and_surfaces[imax]['map']
def _get_maps_surfaces(self, isite, surface_calculation_type=None):
if surface_calculation_type is None:
surface_calculation_type = {'distance_parameter': ('initial_normalized', None),
'angle_parameter': ('initial_normalized', None)}
return self.structure_environments.voronoi.maps_and_surfaces(isite=isite,
surface_calculation_type=surface_calculation_type,
max_dist=self.DEFAULT_MAX_DIST)
def __eq__(self, other):
return (self.__class__.__name__ == other.__class__.__name__ and
self._additional_condition == other.additional_condition)
def as_dict(self):
"""
Bson-serializable dict representation of the SimpleAbundanceChemenvStrategy object.
:return: Bson-serializable dict representation of the SimpleAbundanceChemenvStrategy object.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"additional_condition": self._additional_condition}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the SimpleAbundanceChemenvStrategy object from a dict representation of the
SimpleAbundanceChemenvStrategy object created using the as_dict method.
:param d: dict representation of the SimpleAbundanceChemenvStrategy object
:return: StructureEnvironments object
"""
return cls(additional_condition=d["additional_condition"])
class TargettedPenaltiedAbundanceChemenvStrategy(SimpleAbundanceChemenvStrategy):
"""
Simple ChemenvStrategy using the neighbors that are the most "abundant" in the grid of angle and distance
parameters for the definition of neighbors in the Voronoi approach, with a bias for a given list of target
environments. This can be useful in the case of, e.g. connectivity search of some given environment.
The coordination environment is then given as the one with the lowest continuous symmetry measure
"""
DEFAULT_TARGET_ENVIRONMENTS = ['O:6']
def __init__(self, structure_environments=None, truncate_dist_ang=True,
additional_condition=AbstractChemenvStrategy.AC.ONLY_ACB,
max_nabundant=5, target_environments=DEFAULT_TARGET_ENVIRONMENTS, target_penalty_type='max_csm',
max_csm=5.0, symmetry_measure_type=AbstractChemenvStrategy.DEFAULT_SYMMETRY_MEASURE_TYPE):
raise NotImplementedError('TargettedPenaltiedAbundanceChemenvStrategy not yet implemented')
SimpleAbundanceChemenvStrategy.__init__(self, structure_environments,
additional_condition=additional_condition,
symmetry_measure_type=symmetry_measure_type)
self.max_nabundant = max_nabundant
self.target_environments = target_environments
self.target_penalty_type = target_penalty_type
self.max_csm = max_csm
def get_site_coordination_environment(self, site, isite=None, dequivsite=None, dthissite=None, mysym=None,
return_map=False):
if isite is None:
[isite, dequivsite, dthissite, mysym] = self.equivalent_site_index_and_transform(site)
cn_map = self._get_map(isite)
if cn_map is None:
return None
chemical_environments = (self.structure_environments.ce_list
[self.structure_environments.sites_map[isite]][cn_map[0]][cn_map[1]])
if return_map:
if chemical_environments.coord_geoms is None or len(chemical_environments) == 0:
return cn_map[0], cn_map
return chemical_environments.minimum_geometry(symmetry_measure_type=self._symmetry_measure_type), cn_map
else:
if chemical_environments.coord_geoms is None:
return cn_map[0]
return chemical_environments.minimum_geometry(symmetry_measure_type=self._symmetry_measure_type)
def _get_map(self, isite):
maps_and_surfaces = SimpleAbundanceChemenvStrategy._get_maps_surfaces(self, isite)
if maps_and_surfaces is None:
return SimpleAbundanceChemenvStrategy._get_map(self, isite)
current_map = None
current_target_env_csm = 100.0
surfaces = [map_and_surface['surface'] for map_and_surface in maps_and_surfaces]
order = np.argsort(surfaces)[::-1]
target_cgs = [AllCoordinationGeometries().get_geometry_from_mp_symbol(mp_symbol)
for mp_symbol in self.target_environments]
target_cns = [cg.coordination_number for cg in target_cgs]
for ii in range(min([len(maps_and_surfaces), self.max_nabundant])):
my_map_and_surface = maps_and_surfaces[order[ii]]
mymap = my_map_and_surface['map']
cn = mymap[0]
if cn not in target_cns or cn > 12 or cn == 0:
continue
all_conditions = [params[2] for params in my_map_and_surface['parameters_indices']]
if self._additional_condition not in all_conditions:
continue
cg, cgdict = (self.structure_environments.ce_list
[self.structure_environments.sites_map[isite]]
[mymap[0]][mymap[1]].minimum_geometry(symmetry_measure_type=self._symmetry_measure_type))
if (cg in self.target_environments and cgdict['symmetry_measure'] <= self.max_csm and
cgdict['symmetry_measure'] < current_target_env_csm):
current_map = mymap
current_target_env_csm = cgdict['symmetry_measure']
if current_map is not None:
return current_map
else:
return SimpleAbundanceChemenvStrategy._get_map(self, isite)
@property
def uniquely_determines_coordination_environments(self):
return True
def as_dict(self):
"""
Bson-serializable dict representation of the TargettedPenaltiedAbundanceChemenvStrategy object.
:return: Bson-serializable dict representation of the TargettedPenaltiedAbundanceChemenvStrategy object.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"additional_condition": self._additional_condition,
"max_nabundant": self.max_nabundant,
"target_environments": self.target_environments,
"target_penalty_type": self.target_penalty_type,
"max_csm": self.max_csm}
def __eq__(self, other):
return (self.__class__.__name__ == other.__class__.__name__ and
self._additional_condition == other.additional_condition and
self.max_nabundant == other.max_nabundant and
self.target_environments == other.target_environments and
self.target_penalty_type == other.target_penalty_type and
self.max_csm == other.max_csm)
@classmethod
def from_dict(cls, d):
"""
Reconstructs the TargettedPenaltiedAbundanceChemenvStrategy object from a dict representation of the
TargettedPenaltiedAbundanceChemenvStrategy object created using the as_dict method.
:param d: dict representation of the TargettedPenaltiedAbundanceChemenvStrategy object
:return: TargettedPenaltiedAbundanceChemenvStrategy object
"""
return cls(additional_condition=d["additional_condition"],
max_nabundant=d["max_nabundant"],
target_environments=d["target_environments"],
target_penalty_type=d["target_penalty_type"],
max_csm=d["max_csm"])
class NbSetWeight(with_metaclass(abc.ABCMeta, MSONable)):
@abc.abstractmethod
def as_dict(self):
"""
A JSON serializable dict representation of this neighbors set weight.
"""
pass
@abc.abstractmethod
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
pass
class AngleNbSetWeight(NbSetWeight):
SHORT_NAME = 'AngleWeight'
def __init__(self, aa=1.0):
self.aa = aa
if self.aa == 1.0:
self.aw = self.angle_sum
else:
self.aw = self.angle_sumn
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
return self.aw(nb_set=nb_set)
def angle_sum(self, nb_set):
return np.sum(nb_set.angles) / (4.0 * np.pi)
def angle_sumn(self, nb_set):
return np.power(self.angle_sum(nb_set=nb_set), self.aa)
def __eq__(self, other):
return self.aa == other.aa
def __ne__(self, other):
return not self == other
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"aa": self.aa
}
@classmethod
def from_dict(cls, dd):
return cls(aa=dd['aa'])
class NormalizedAngleDistanceNbSetWeight(NbSetWeight):
SHORT_NAME = 'NormAngleDistWeight'
def __init__(self, average_type, aa, bb):
self.average_type = average_type
if self.average_type == 'geometric':
self.eval = self.gweight
elif self.average_type == 'arithmetic':
self.eval = self.aweight
else:
raise ValueError('Average type is "{}" while it should be '
'"geometric" or "arithmetic"'.format(average_type))
self.aa = aa
self.bb = bb
if self.aa == 0:
if self.bb == 1:
self.fda = self.invdist
elif self.bb == 0:
raise ValueError('Both exponents are 0.')
else:
self.fda = self.invndist
elif self.bb == 0:
if self.aa == 1:
self.fda = self.ang
else:
self.fda = self.angn
else:
if self.aa == 1:
if self.bb == 1:
self.fda = self.anginvdist
else:
self.fda = self.anginvndist
else:
if self.bb == 1:
self.fda = self.angninvdist
else:
self.fda = self.angninvndist
def __eq__(self, other):
return self.average_type == other.average_type and self.aa == other.aa and self.bb == other.bb
def __ne__(self, other):
return not self == other
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"average_type": self.average_type,
"aa": self.aa,
"bb": self.bb
}
@classmethod
def from_dict(cls, dd):
return cls(average_type=dd['average_type'], aa=dd['aa'], bb=dd['bb'])
def invdist(self, nb_set):
return [1.0 / dist for dist in nb_set.normalized_distances]
def invndist(self, nb_set):
return [1.0 / dist**self.bb for dist in nb_set.normalized_distances]
def ang(self, nb_set):
return nb_set.normalized_angles
def angn(self, nb_set):
return [ang**self.aa for ang in nb_set.normalized_angles]
def anginvdist(self, nb_set):
nangles = nb_set.normalized_angles
return [nangles[ii] / dist for ii, dist in enumerate(nb_set.normalized_distances)]
def anginvndist(self, nb_set):
nangles = nb_set.normalized_angles
return [nangles[ii] / dist**self.bb for ii, dist in enumerate(nb_set.normalized_distances)]
def angninvdist(self, nb_set):
nangles = nb_set.normalized_angles
return [nangles[ii]**self.aa / dist for ii, dist in enumerate(nb_set.normalized_distances)]
def angninvndist(self, nb_set):
nangles = nb_set.normalized_angles
return [nangles[ii]**self.aa / dist**self.bb for ii, dist in enumerate(nb_set.normalized_distances)]
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
fda_list = self.fda(nb_set=nb_set)
return self.eval(fda_list=fda_list)
def gweight(self, fda_list):
return gmean(fda_list)
def aweight(self, fda_list):
return np.mean(fda_list)
def get_effective_csm(nb_set, cn_map, structure_environments, additional_info,
symmetry_measure_type, max_effective_csm, effective_csm_estimator_ratio_function):
try:
effective_csm = additional_info['effective_csms'][nb_set.isite][cn_map]
except KeyError:
site_ce_list = structure_environments.ce_list[nb_set.isite]
site_chemenv = site_ce_list[cn_map[0]][cn_map[1]]
if site_chemenv is None:
effective_csm = 100.0
else:
mingeoms = site_chemenv.minimum_geometries(symmetry_measure_type=symmetry_measure_type,
max_csm=max_effective_csm)
if len(mingeoms) == 0:
effective_csm = 100.0
else:
csms = [ce_dict['other_symmetry_measures'][symmetry_measure_type] for mp_symbol, ce_dict in mingeoms
if ce_dict['other_symmetry_measures'][symmetry_measure_type] <= max_effective_csm]
effective_csm = effective_csm_estimator_ratio_function.mean_estimator(csms)
set_info(additional_info=additional_info, field='effective_csms',
isite=nb_set.isite, cn_map=cn_map, value=effective_csm)
return effective_csm
def set_info(additional_info, field, isite, cn_map, value):
try:
additional_info[field][isite][cn_map] = value
except KeyError:
try:
additional_info[field][isite] = {cn_map: value}
except KeyError:
additional_info[field] = {isite: {cn_map: value}}
class SelfCSMNbSetWeight(NbSetWeight):
SHORT_NAME = 'SelfCSMWeight'
DEFAULT_EFFECTIVE_CSM_ESTIMATOR = {'function': 'power2_inverse_decreasing',
'options': {'max_csm': 8.0}}
DEFAULT_WEIGHT_ESTIMATOR = {'function': 'power2_decreasing_exp',
'options': {'max_csm': 8.0,
'alpha': 1.0}}
DEFAULT_SYMMETRY_MEASURE_TYPE = 'csm_wcs_ctwcc'
def __init__(self, effective_csm_estimator=DEFAULT_EFFECTIVE_CSM_ESTIMATOR,
weight_estimator=DEFAULT_WEIGHT_ESTIMATOR,
symmetry_measure_type=DEFAULT_SYMMETRY_MEASURE_TYPE):
self.effective_csm_estimator = effective_csm_estimator
self.effective_csm_estimator_rf = CSMInfiniteRatioFunction.from_dict(effective_csm_estimator)
self.weight_estimator = weight_estimator
self.weight_estimator_rf = CSMFiniteRatioFunction.from_dict(weight_estimator)
self.symmetry_measure_type = symmetry_measure_type
self.max_effective_csm = self.effective_csm_estimator['options']['max_csm']
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
effective_csm = get_effective_csm(nb_set=nb_set, cn_map=cn_map,
structure_environments=structure_environments,
additional_info=additional_info,
symmetry_measure_type=self.symmetry_measure_type,
max_effective_csm=self.max_effective_csm,
effective_csm_estimator_ratio_function=self.effective_csm_estimator_rf)
weight = self.weight_estimator_rf.evaluate(effective_csm)
set_info(additional_info=additional_info, field='self_csms_weights', isite=nb_set.isite,
cn_map=cn_map, value=weight)
return weight
def __eq__(self, other):
return (self.effective_csm_estimator == other.effective_csm_estimator and
self.weight_estimator == other.weight_estimator and
self.symmetry_measure_type == other.symmetry_measure_type)
def __ne__(self, other):
return not self == other
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"effective_csm_estimator": self.effective_csm_estimator,
"weight_estimator": self.weight_estimator,
"symmetry_measure_type": self.symmetry_measure_type
}
@classmethod
def from_dict(cls, dd):
return cls(effective_csm_estimator=dd['effective_csm_estimator'],
weight_estimator=dd['weight_estimator'],
symmetry_measure_type=dd['symmetry_measure_type'])
class DeltaCSMNbSetWeight(NbSetWeight):
SHORT_NAME = 'DeltaCSMWeight'
DEFAULT_EFFECTIVE_CSM_ESTIMATOR = {'function': 'power2_inverse_decreasing',
'options': {'max_csm': 8.0}}
DEFAULT_SYMMETRY_MEASURE_TYPE = 'csm_wcs_ctwcc'
DEFAULT_WEIGHT_ESTIMATOR = {'function': 'smootherstep',
'options': {'delta_csm_min': 0.5,
'delta_csm_max': 3.0}}
def __init__(self, effective_csm_estimator=DEFAULT_EFFECTIVE_CSM_ESTIMATOR,
weight_estimator=DEFAULT_WEIGHT_ESTIMATOR,
delta_cn_weight_estimators=None,
symmetry_measure_type=DEFAULT_SYMMETRY_MEASURE_TYPE):
self.effective_csm_estimator = effective_csm_estimator
self.effective_csm_estimator_rf = CSMInfiniteRatioFunction.from_dict(effective_csm_estimator)
self.weight_estimator = weight_estimator
if self.weight_estimator is not None:
self.weight_estimator_rf = DeltaCSMRatioFunction.from_dict(weight_estimator)
self.delta_cn_weight_estimators = delta_cn_weight_estimators
self.delta_cn_weight_estimators_rfs = {}
if delta_cn_weight_estimators is not None:
for delta_cn, dcn_w_estimator in delta_cn_weight_estimators.items():
self.delta_cn_weight_estimators_rfs[delta_cn] = DeltaCSMRatioFunction.from_dict(dcn_w_estimator)
self.symmetry_measure_type = symmetry_measure_type
self.max_effective_csm = self.effective_csm_estimator['options']['max_csm']
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
effcsm = get_effective_csm(nb_set=nb_set, cn_map=cn_map,
structure_environments=structure_environments,
additional_info=additional_info,
symmetry_measure_type=self.symmetry_measure_type,
max_effective_csm=self.max_effective_csm,
effective_csm_estimator_ratio_function=self.effective_csm_estimator_rf)
cn = cn_map[0]
inb_set = cn_map[1]
isite = nb_set.isite
delta_csm = None
delta_csm_cn_map2 = None
nb_set_weight = 1.0
for cn2, nb_sets in structure_environments.neighbors_sets[isite].items():
if cn2 < cn:
continue
for inb_set2, nb_set2 in enumerate(nb_sets):
if cn == cn2 and inb_set == inb_set:
continue
effcsm2 = get_effective_csm(nb_set=nb_set2, cn_map=(cn2, inb_set2),
structure_environments=structure_environments,
additional_info=additional_info,
symmetry_measure_type=self.symmetry_measure_type,
max_effective_csm=self.max_effective_csm,
effective_csm_estimator_ratio_function=self.effective_csm_estimator_rf)
this_delta_csm = effcsm2 - effcsm
if cn2 == cn:
if this_delta_csm < 0.0:
set_info(additional_info=additional_info, field='delta_csms', isite=isite,
cn_map=cn_map, value=this_delta_csm)
set_info(additional_info=additional_info, field='delta_csms_weights', isite=isite,
cn_map=cn_map, value=0.0)
set_info(additional_info=additional_info, field='delta_csms_cn_map2', isite=isite,
cn_map=cn_map, value=(cn2, inb_set2))
return 0.0
else:
dcn = cn2 - cn
if dcn in self.delta_cn_weight_estimators_rfs:
this_delta_csm_weight = self.delta_cn_weight_estimators_rfs[dcn].evaluate(this_delta_csm)
else:
this_delta_csm_weight = self.weight_estimator_rf.evaluate(this_delta_csm)
if this_delta_csm_weight < nb_set_weight:
delta_csm = this_delta_csm
delta_csm_cn_map2 = (cn2, inb_set2)
nb_set_weight = this_delta_csm_weight
set_info(additional_info=additional_info, field='delta_csms', isite=isite,
cn_map=cn_map, value=delta_csm)
set_info(additional_info=additional_info, field='delta_csms_weights', isite=isite,
cn_map=cn_map, value=nb_set_weight)
set_info(additional_info=additional_info, field='delta_csms_cn_map2', isite=isite,
cn_map=cn_map, value=delta_csm_cn_map2)
return nb_set_weight
def __eq__(self, other):
return (self.effective_csm_estimator == other.effective_csm_estimator and
self.weight_estimator == other.weight_estimator and
self.delta_cn_weight_estimators == other.delta_cn_weight_estimators and
self.symmetry_measure_type == other.symmetry_measure_type)
def __ne__(self, other):
return not self == other
@classmethod
def delta_cn_specifics(cls, delta_csm_mins=None, delta_csm_maxs=None, function='smootherstep',
symmetry_measure_type='csm_wcs_ctwcc',
effective_csm_estimator=DEFAULT_EFFECTIVE_CSM_ESTIMATOR):
if delta_csm_mins is None or delta_csm_maxs is None:
delta_cn_weight_estimators = {dcn: {'function': function,
'options': {'delta_csm_min': 0.25+dcn*0.25,
'delta_csm_max': 5.0+dcn*0.25}} for dcn in range(1, 13)}
else:
delta_cn_weight_estimators = {dcn: {'function': function,
'options': {'delta_csm_min': delta_csm_mins[dcn-1],
'delta_csm_max': delta_csm_maxs[dcn-1]}}
for dcn in range(1, 13)}
return cls(effective_csm_estimator=effective_csm_estimator,
weight_estimator={'function': function,
'options': {'delta_csm_min': delta_cn_weight_estimators[12]
['options']['delta_csm_min'],
'delta_csm_max': delta_cn_weight_estimators[12]
['options']['delta_csm_max']}},
delta_cn_weight_estimators=delta_cn_weight_estimators,
symmetry_measure_type=symmetry_measure_type)
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"effective_csm_estimator": self.effective_csm_estimator,
"weight_estimator": self.weight_estimator,
"delta_cn_weight_estimators": self.delta_cn_weight_estimators,
"symmetry_measure_type": self.symmetry_measure_type
}
@classmethod
def from_dict(cls, dd):
return cls(effective_csm_estimator=dd['effective_csm_estimator'],
weight_estimator=dd['weight_estimator'],
delta_cn_weight_estimators={int(dcn): dcn_estimator
for dcn, dcn_estimator in dd['delta_cn_weight_estimators'].items()}
if ('delta_cn_weight_estimators' in dd and dd['delta_cn_weight_estimators'] is not None) else None,
symmetry_measure_type=dd['symmetry_measure_type'])
class CNBiasNbSetWeight(NbSetWeight):
SHORT_NAME = 'CNBiasWeight'
def __init__(self, cn_weights, initialization_options):
self.cn_weights = cn_weights
self.initialization_options = initialization_options
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
return self.cn_weights[len(nb_set)]
def __eq__(self, other):
return (self.cn_weights == other.cn_weights and
self.initialization_options == other.initialization_options)
def __ne__(self, other):
return not self == other
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"cn_weights": {str(cn): cnw for cn, cnw in self.cn_weights.items()},
"initialization_options": self.initialization_options,
}
@classmethod
def from_dict(cls, dd):
return cls(cn_weights={int(cn): cnw for cn, cnw in dd['cn_weights'].items()},
initialization_options=dd['initialization_options'])
@classmethod
def linearly_equidistant(cls, weight_cn1, weight_cn13):
initialization_options = {'type': 'linearly_equidistant',
'weight_cn1': weight_cn1,
'weight_cn13': weight_cn13
}
dw = (weight_cn13 - weight_cn1) / 12.0
cn_weights = {cn: weight_cn1 + (cn - 1) * dw for cn in range(1, 14)}
return cls(cn_weights=cn_weights, initialization_options=initialization_options)
@classmethod
def geometrically_equidistant(cls, weight_cn1, weight_cn13):
initialization_options = {'type': 'geometrically_equidistant',
'weight_cn1': weight_cn1,
'weight_cn13': weight_cn13
}
factor = np.power(float(weight_cn13) / weight_cn1, 1.0 / 12.0)
cn_weights = {cn: weight_cn1 * np.power(factor, cn - 1) for cn in range(1, 14)}
return cls(cn_weights=cn_weights, initialization_options=initialization_options)
@classmethod
def explicit(cls, cn_weights):
initialization_options = {'type': 'explicit'}
if set(cn_weights.keys()) != set(range(1, 14)):
raise ValueError('Weights should be provided for CN 1 to 13')
return cls(cn_weights=cn_weights, initialization_options=initialization_options)
@classmethod
def from_description(cls, dd):
if dd['type'] == 'linearly_equidistant':
return cls.linearly_equidistant(weight_cn1=dd['weight_cn1'], weight_cn13=dd['weight_cn13'])
elif dd['type'] == 'geometrically_equidistant':
return cls.geometrically_equidistant(weight_cn1=dd['weight_cn1'], weight_cn13=dd['weight_cn13'])
elif dd['type'] == 'explicit':
return cls.explicit(cn_weights=dd['cn_weights'])
class DistanceAngleAreaNbSetWeight(NbSetWeight):
SHORT_NAME = 'DistAngleAreaWeight'
AC = AdditionalConditions()
DEFAULT_SURFACE_DEFINITION = {'type': 'standard_elliptic',
'distance_bounds': {'lower': 1.2, 'upper': 1.8},
'angle_bounds': {'lower': 0.1, 'upper': 0.8}}
def __init__(self, weight_type='has_intersection', surface_definition=DEFAULT_SURFACE_DEFINITION,
nb_sets_from_hints='fallback_to_source', other_nb_sets='0_weight',
additional_condition=AC.ONLY_ACB, smoothstep_distance=None, smoothstep_angle=None):
self.weight_type = weight_type
if weight_type == 'has_intersection':
self.area_weight = self.w_area_has_intersection
elif weight_type == 'has_intersection_smoothstep':
raise NotImplementedError()
# self.area_weight = self.w_area_has_intersection_smoothstep
else:
raise ValueError('Weight type is "{}" while it should be "has_intersection"'.format(weight_type))
self.surface_definition = surface_definition
self.nb_sets_from_hints = nb_sets_from_hints
self.other_nb_sets = other_nb_sets
self.additional_condition = additional_condition
self.smoothstep_distance = smoothstep_distance
self.smoothstep_angle = smoothstep_angle
if self.nb_sets_from_hints == 'fallback_to_source':
if self.other_nb_sets == '0_weight':
self.w_area_intersection_specific = self.w_area_intersection_nbsfh_fbs_onb0
else:
raise ValueError('Other nb_sets should be "0_weight"')
else:
raise ValueError('Nb_sets from hints should fallback to source')
lower_and_upper_functions = get_lower_and_upper_f(surface_calculation_options=surface_definition)
self.dmin = surface_definition['distance_bounds']['lower']
self.dmax = surface_definition['distance_bounds']['upper']
self.amin = surface_definition['angle_bounds']['lower']
self.amax = surface_definition['angle_bounds']['upper']
self.f_lower = lower_and_upper_functions['lower']
self.f_upper = lower_and_upper_functions['upper']
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
return self.area_weight(nb_set=nb_set, structure_environments=structure_environments,
cn_map=cn_map, additional_info=additional_info)
def w_area_has_intersection_smoothstep(self, nb_set, structure_environments,
cn_map, additional_info):
w_area = self.w_area_intersection_specific(nb_set=nb_set, structure_environments=structure_environments,
cn_map=cn_map, additional_info=additional_info)
if w_area > 0.0:
if self.smoothstep_distance is not None:
w_area = w_area
if self.smoothstep_angle is not None:
w_area = w_area
return w_area
def w_area_has_intersection(self, nb_set, structure_environments,
cn_map, additional_info):
return self.w_area_intersection_specific(nb_set=nb_set, structure_environments=structure_environments,
cn_map=cn_map, additional_info=additional_info)
def w_area_intersection_nbsfh_fbs_onb0(self, nb_set, structure_environments,
cn_map, additional_info):
dist_ang_sources = [src for src in nb_set.sources
if src['origin'] == 'dist_ang_ac_voronoi' and src['ac'] == self.additional_condition]
if len(dist_ang_sources) > 0:
for src in dist_ang_sources:
d1 = src['dp_dict']['min']
d2 = src['dp_dict']['next']
a1 = src['ap_dict']['next']
a2 = src['ap_dict']['max']
if self.rectangle_crosses_area(d1=d1, d2=d2, a1=a1, a2=a2):
return 1.0
return 0.0
else:
from_hints_sources = [src for src in nb_set.sources if src['origin'] == 'nb_set_hints']
if len(from_hints_sources) == 0:
return 0.0
elif len(from_hints_sources) != 1:
raise ValueError('Found multiple hints sources for nb_set')
else:
cn_map_src = from_hints_sources[0]['cn_map_source']
nb_set_src = structure_environments.neighbors_sets[nb_set.isite][cn_map_src[0]][cn_map_src[1]]
dist_ang_sources = [src for src in nb_set_src.sources
if src['origin'] == 'dist_ang_ac_voronoi' and
src['ac'] == self.additional_condition]
if len(dist_ang_sources) == 0:
return 0.0
for src in dist_ang_sources:
d1 = src['dp_dict']['min']
d2 = src['dp_dict']['next']
a1 = src['ap_dict']['next']
a2 = src['ap_dict']['max']
if self.rectangle_crosses_area(d1=d1, d2=d2, a1=a1, a2=a2):
return 1.0
return 0.0
def rectangle_crosses_area(self, d1, d2, a1, a2):
# Case 1
if d1 <= self.dmin and d2 <= self.dmin:
return False
# Case 6
if d1 >= self.dmax and d2 >= self.dmax:
return False
# Case 2
if d1 <= self.dmin and d2 <= self.dmax:
ld2 = self.f_lower(d2)
if a2 <= ld2 or a1 >= self.amax:
return False
return True
# Case 3
if d1 <= self.dmin and d2 >= self.dmax:
if a2 <= self.amin or a1 >= self.amax:
return False
return True
# Case 4
if self.dmin <= d1 <= self.dmax and self.dmin <= d2 <= self.dmax:
ld1 = self.f_lower(d1)
ld2 = self.f_lower(d2)
if a2 <= ld1 and a2 <= ld2:
return False
ud1 = self.f_upper(d1)
ud2 = self.f_upper(d2)
if a1 >= ud1 and a1 >= ud2:
return False
return True
# Case 5
if self.dmin <= d1 <= self.dmax and d2 >= self.dmax:
ud1 = self.f_upper(d1)
if a1 >= ud1 or a2 <= self.amin:
return False
return True
raise ValueError('Should not reach this point!')
def __eq__(self, other):
return (self.weight_type == other.weight_type and
self.surface_definition == other.surface_definition and
self.nb_sets_from_hints == other.nb_sets_from_hints and
self.other_nb_sets == other.other_nb_sets and
self.additional_condition == other.additional_condition
)
def __ne__(self, other):
return not self == other
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"weight_type": self.weight_type,
"surface_definition": self.surface_definition,
"nb_sets_from_hints": self.nb_sets_from_hints,
"other_nb_sets": self.other_nb_sets,
"additional_condition": self.additional_condition}
@classmethod
def from_dict(cls, dd):
return cls(weight_type=dd['weight_type'], surface_definition=dd['surface_definition'],
nb_sets_from_hints=dd['nb_sets_from_hints'], other_nb_sets=dd['other_nb_sets'],
additional_condition=dd['additional_condition'])
class DistancePlateauNbSetWeight(NbSetWeight):
SHORT_NAME = 'DistancePlateauWeight'
def __init__(self, distance_function=None, weight_function=None):
if distance_function is None:
self.distance_function = {'type': 'normalized_distance'}
else:
self.distance_function = distance_function
if weight_function is None:
self.weight_function = {'function': 'inverse_smootherstep', 'options': {'lower': 0.2, 'upper': 0.4}}
else:
self.weight_function = weight_function
self.weight_rf = RatioFunction.from_dict(self.weight_function)
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
return self.weight_rf.eval(nb_set.distance_plateau())
def __eq__(self, other):
return self.__class__ == other.__class__
def __ne__(self, other):
return not self == other
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"distance_function": self.distance_function,
"weight_function": self.weight_function
}
@classmethod
def from_dict(cls, dd):
return cls(distance_function=dd['distance_function'], weight_function=dd['weight_function'])
class AnglePlateauNbSetWeight(NbSetWeight):
SHORT_NAME = 'AnglePlateauWeight'
def __init__(self, angle_function=None, weight_function=None):
if angle_function is None:
self.angle_function = {'type': 'normalized_angle'}
else:
self.angle_function = angle_function
if weight_function is None:
self.weight_function = {'function': 'inverse_smootherstep', 'options': {'lower': 0.05, 'upper': 0.15}}
else:
self.weight_function = weight_function
self.weight_rf = RatioFunction.from_dict(self.weight_function)
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
return self.weight_rf.eval(nb_set.angle_plateau())
def __eq__(self, other):
return self.__class__ == other.__class__
def __ne__(self, other):
return not self == other
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"angle_function": self.angle_function,
"weight_function": self.weight_function
}
@classmethod
def from_dict(cls, dd):
return cls(angle_function=dd['angle_function'], weight_function=dd['weight_function'])
class MultiUnlimitedWeightsChemenvStrategy(AbstractChemenvStrategy):
"""
MultiUnlimitedWeightsChemenvStrategy
"""
STRATEGY_DESCRIPTION = ' Multi Unlimited Weights ChemenvStrategy'
DEFAULT_CE_ESTIMATOR = {'function': 'power2_inverse_power2_decreasing',
'options': {'max_csm': 8.0}}
def __init__(self, structure_environments=None,
additional_condition=AbstractChemenvStrategy.AC.ONLY_ACB,
symmetry_measure_type=AbstractChemenvStrategy.DEFAULT_SYMMETRY_MEASURE_TYPE,
nb_set_weights=None,
ce_estimator=DEFAULT_CE_ESTIMATOR):
"""
Constructor for the MultiUnlimitedWeightsChemenvStrategy.
:param structure_environments: StructureEnvironments object containing all the information on the
coordination of the sites in a structure
"""
AbstractChemenvStrategy.__init__(self, structure_environments, symmetry_measure_type=symmetry_measure_type)
self._additional_condition = additional_condition
if nb_set_weights is None:
raise ValueError()
self.nb_set_weights = nb_set_weights
self.ordered_weights = []
for nb_set_weight in self.nb_set_weights:
self.ordered_weights.append({'weight': nb_set_weight, 'name': nb_set_weight.SHORT_NAME})
self.ce_estimator = ce_estimator
self.ce_estimator_ratio_function = CSMInfiniteRatioFunction.from_dict(self.ce_estimator)
self.ce_estimator_fractions = self.ce_estimator_ratio_function.fractions
@property
def uniquely_determines_coordination_environments(self):
return False
def get_site_coordination_environments_fractions(self, site, isite=None, dequivsite=None, dthissite=None,
mysym=None, ordered=True, min_fraction=0.0, return_maps=True,
return_strategy_dict_info=False, return_all=False):
if isite is None or dequivsite is None or dthissite is None or mysym is None:
[isite, dequivsite, dthissite, mysym] = self.equivalent_site_index_and_transform(site)
site_nb_sets = self.structure_environments.neighbors_sets[isite]
if site_nb_sets is None:
return None
cn_maps = []
for cn, nb_sets in site_nb_sets.items():
for inb_set, nb_set in enumerate(nb_sets):
#CHECK THE ADDITIONAL CONDITION HERE ?
cn_maps.append((cn, inb_set))
weights_additional_info = {'weights': {isite: {}}}
for wdict in self.ordered_weights:
cn_maps_new = []
weight = wdict['weight']
weight_name = wdict['name']
for cn_map in cn_maps:
nb_set = site_nb_sets[cn_map[0]][cn_map[1]]
w_nb_set = weight.weight(nb_set=nb_set, structure_environments=self.structure_environments,
cn_map=cn_map, additional_info=weights_additional_info)
if cn_map not in weights_additional_info['weights'][isite]:
weights_additional_info['weights'][isite][cn_map] = {}
weights_additional_info['weights'][isite][cn_map][weight_name] = w_nb_set
if w_nb_set > 0.0:
cn_maps_new.append(cn_map)
cn_maps = cn_maps_new
for cn_map, weights in weights_additional_info['weights'][isite].items():
weights_additional_info['weights'][isite][cn_map]['Product'] = np.product(weights.values())
w_nb_sets = {cn_map: weights['Product']
for cn_map, weights in weights_additional_info['weights'][isite].items()}
w_nb_sets_total = np.sum(w_nb_sets.values())
nb_sets_fractions = {cn_map: w_nb_set / w_nb_sets_total for cn_map, w_nb_set in w_nb_sets.items()}
for cn_map in weights_additional_info['weights'][isite]:
weights_additional_info['weights'][isite][cn_map]['NbSetFraction'] = nb_sets_fractions[cn_map]
ce_symbols = []
ce_dicts = []
ce_fractions = []
ce_dict_fractions = []
ce_maps = []
site_ce_list = self.structure_environments.ce_list[isite]
if return_all:
for cn_map, nb_set_fraction in nb_sets_fractions.items():
cn = cn_map[0]
inb_set = cn_map[1]
site_ce_nb_set = site_ce_list[cn][inb_set]
if site_ce_nb_set is None:
continue
mingeoms = site_ce_nb_set.minimum_geometries(symmetry_measure_type=self.symmetry_measure_type)
if len(mingeoms) > 0:
csms = [ce_dict['other_symmetry_measures'][self.symmetry_measure_type]
for ce_symbol, ce_dict in mingeoms]
fractions = self.ce_estimator_fractions(csms)
if fractions is None:
ce_symbols.append('UNCLEAR:{:d}'.format(cn))
ce_dicts.append(None)
ce_fractions.append(nb_set_fraction)
all_weights = weights_additional_info['weights'][isite][cn_map]
dict_fractions = {wname: wvalue for wname, wvalue in all_weights.items()}
dict_fractions['CEFraction'] = None
dict_fractions['Fraction'] = nb_set_fraction
ce_dict_fractions.append(dict_fractions)
ce_maps.append(cn_map)
else:
for ifraction, fraction in enumerate(fractions):
ce_symbols.append(mingeoms[ifraction][0])
ce_dicts.append(mingeoms[ifraction][1])
ce_fractions.append(nb_set_fraction * fraction)
all_weights = weights_additional_info['weights'][isite][cn_map]
dict_fractions = {wname: wvalue for wname, wvalue in all_weights.items()}
dict_fractions['CEFraction'] = fraction
dict_fractions['Fraction'] = nb_set_fraction * fraction
ce_dict_fractions.append(dict_fractions)
ce_maps.append(cn_map)
else:
ce_symbols.append('UNCLEAR:{:d}'.format(cn))
ce_dicts.append(None)
ce_fractions.append(nb_set_fraction)
all_weights = weights_additional_info['weights'][isite][cn_map]
dict_fractions = {wname: wvalue for wname, wvalue in all_weights.items()}
dict_fractions['CEFraction'] = None
dict_fractions['Fraction'] = nb_set_fraction
ce_dict_fractions.append(dict_fractions)
ce_maps.append(cn_map)
else:
for cn_map, nb_set_fraction in nb_sets_fractions.items():
if nb_set_fraction > 0.0:
cn = cn_map[0]
inb_set = cn_map[1]
site_ce_nb_set = site_ce_list[cn][inb_set]
mingeoms = site_ce_nb_set.minimum_geometries(symmetry_measure_type=self._symmetry_measure_type)
csms = [ce_dict['other_symmetry_measures'][self._symmetry_measure_type]
for ce_symbol, ce_dict in mingeoms]
fractions = self.ce_estimator_fractions(csms)
for ifraction, fraction in enumerate(fractions):
if fraction > 0.0:
ce_symbols.append(mingeoms[ifraction][0])
ce_dicts.append(mingeoms[ifraction][1])
ce_fractions.append(nb_set_fraction * fraction)
all_weights = weights_additional_info['weights'][isite][cn_map]
dict_fractions = {wname: wvalue for wname, wvalue in all_weights.items()}
dict_fractions['CEFraction'] = fraction
dict_fractions['Fraction'] = nb_set_fraction * fraction
ce_dict_fractions.append(dict_fractions)
ce_maps.append(cn_map)
if ordered:
indices = np.argsort(ce_fractions)[::-1]
else:
indices = list(range(len(ce_fractions)))
fractions_info_list = [
{'ce_symbol': ce_symbols[ii], 'ce_dict': ce_dicts[ii], 'ce_fraction': ce_fractions[ii]}
for ii in indices if ce_fractions[ii] >= min_fraction]
if return_maps:
for ifinfo, ii in enumerate(indices):
if ce_fractions[ii] >= min_fraction:
fractions_info_list[ifinfo]['ce_map'] = ce_maps[ii]
if return_strategy_dict_info:
for ifinfo, ii in enumerate(indices):
if ce_fractions[ii] >= min_fraction:
fractions_info_list[ifinfo]['strategy_info'] = ce_dict_fractions[ii]
return fractions_info_list
def get_site_coordination_environment(self, site):
pass
def get_site_neighbors(self, site):
pass
def get_site_coordination_environments(self, site, isite=None, dequivsite=None, dthissite=None, mysym=None,
return_maps=False):
if isite is None or dequivsite is None or dthissite is None or mysym is None:
[isite, dequivsite, dthissite, mysym] = self.equivalent_site_index_and_transform(site)
return [self.get_site_coordination_environment(site=site, isite=isite, dequivsite=dequivsite,
dthissite=dthissite, mysym=mysym, return_map=return_maps)]
def __eq__(self, other):
return (self.__class__.__name__ == other.__class__.__name__ and
self._additional_condition == other._additional_condition and
self.symmetry_measure_type == other.symmetry_measure_type and
self.nb_set_weights == other.nb_set_weights and
self.ce_estimator == other.ce_estimator)
def __ne__(self, other):
return not self == other
def as_dict(self):
"""
Bson-serializable dict representation of the MultiWeightsChemenvStrategy object.
:return: Bson-serializable dict representation of the MultiWeightsChemenvStrategy object.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"additional_condition": self._additional_condition,
"symmetry_measure_type": self.symmetry_measure_type,
"nb_set_weights": [nb_set_weight.as_dict() for nb_set_weight in self.nb_set_weights],
"ce_estimator": self.ce_estimator,
}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the MultiWeightsChemenvStrategy object from a dict representation of the
MultipleAbundanceChemenvStrategy object created using the as_dict method.
:param d: dict representation of the MultiWeightsChemenvStrategy object
:return: MultiWeightsChemenvStrategy object
"""
return cls(additional_condition=d["additional_condition"],
symmetry_measure_type=d["symmetry_measure_type"],
nb_set_weights=d["nb_set_weights"],
ce_estimator=d["ce_estimator"])
class MultiWeightsChemenvStrategy(AbstractChemenvStrategy):
"""
MultiWeightsChemenvStrategy
"""
STRATEGY_DESCRIPTION = ' Multi Weights ChemenvStrategy'
# STRATEGY_INFO_FIELDS = ['cn_map_surface_fraction', 'cn_map_surface_weight',
# 'cn_map_mean_csm', 'cn_map_csm_weight',
# 'cn_map_delta_csm', 'cn_map_delta_csms_cn_map2', 'cn_map_delta_csm_weight',
# 'cn_map_cn_weight',
# 'cn_map_fraction', 'cn_map_ce_fraction', 'ce_fraction']
DEFAULT_CE_ESTIMATOR = {'function': 'power2_inverse_power2_decreasing',
'options': {'max_csm': 8.0}}
DEFAULT_DIST_ANG_AREA_WEIGHT = {}
def __init__(self, structure_environments=None,
additional_condition=AbstractChemenvStrategy.AC.ONLY_ACB,
symmetry_measure_type=AbstractChemenvStrategy.DEFAULT_SYMMETRY_MEASURE_TYPE,
dist_ang_area_weight=None,
self_csm_weight=None,
delta_csm_weight=None,
cn_bias_weight=None,
angle_weight=None,
normalized_angle_distance_weight=None,
ce_estimator=DEFAULT_CE_ESTIMATOR
):
"""
Constructor for the MultiWeightsChemenvStrategy.
:param structure_environments: StructureEnvironments object containing all the information on the
coordination of the sites in a structure
"""
AbstractChemenvStrategy.__init__(self, structure_environments, symmetry_measure_type=symmetry_measure_type)
self._additional_condition = additional_condition
self.dist_ang_area_weight = dist_ang_area_weight
self.angle_weight = angle_weight
self.normalized_angle_distance_weight = normalized_angle_distance_weight
self.self_csm_weight = self_csm_weight
self.delta_csm_weight = delta_csm_weight
self.cn_bias_weight = cn_bias_weight
self.ordered_weights = []
if dist_ang_area_weight is not None:
self.ordered_weights.append({'weight': dist_ang_area_weight, 'name': 'DistAngArea'})
if self_csm_weight is not None:
self.ordered_weights.append({'weight': self_csm_weight, 'name': 'SelfCSM'})
if delta_csm_weight is not None:
self.ordered_weights.append({'weight': delta_csm_weight, 'name': 'DeltaCSM'})
if cn_bias_weight is not None:
self.ordered_weights.append({'weight': cn_bias_weight, 'name': 'CNBias'})
if angle_weight is not None:
self.ordered_weights.append({'weight': angle_weight, 'name': 'Angle'})
if normalized_angle_distance_weight is not None:
self.ordered_weights.append({'weight': normalized_angle_distance_weight, 'name': 'NormalizedAngDist'})
self.ce_estimator = ce_estimator
self.ce_estimator_ratio_function = CSMInfiniteRatioFunction.from_dict(self.ce_estimator)
self.ce_estimator_fractions = self.ce_estimator_ratio_function.fractions
@classmethod
def stats_article_weights_parameters(cls):
self_csm_weight = SelfCSMNbSetWeight(weight_estimator={'function': 'power2_decreasing_exp',
'options': {'max_csm': 8.0,
'alpha': 1.0}})
surface_definition = {'type': 'standard_elliptic',
'distance_bounds': {'lower': 1.15, 'upper': 2.0},
'angle_bounds': {'lower': 0.05, 'upper': 0.75}}
da_area_weight = DistanceAngleAreaNbSetWeight(weight_type='has_intersection',
surface_definition=surface_definition,
nb_sets_from_hints='fallback_to_source',
other_nb_sets='0_weight',
additional_condition=DistanceAngleAreaNbSetWeight.AC.ONLY_ACB)
symmetry_measure_type = 'csm_wcs_ctwcc'
delta_weight = DeltaCSMNbSetWeight.delta_cn_specifics()
bias_weight = None
angle_weight = None
nad_weight = None
return cls(dist_ang_area_weight=da_area_weight,
self_csm_weight=self_csm_weight,
delta_csm_weight=delta_weight,
cn_bias_weight=bias_weight,
angle_weight=angle_weight,
normalized_angle_distance_weight=nad_weight,
symmetry_measure_type=symmetry_measure_type)
@property
def uniquely_determines_coordination_environments(self):
return False
def get_site_coordination_environments_fractions(self, site, isite=None, dequivsite=None, dthissite=None,
mysym=None, ordered=True, min_fraction=0.0, return_maps=True,
return_strategy_dict_info=False, return_all=False):
if isite is None or dequivsite is None or dthissite is None or mysym is None:
[isite, dequivsite, dthissite, mysym] = self.equivalent_site_index_and_transform(site)
site_nb_sets = self.structure_environments.neighbors_sets[isite]
if site_nb_sets is None:
return None
cn_maps = []
for cn, nb_sets in site_nb_sets.items():
for inb_set, nb_set in enumerate(nb_sets):
#CHECK THE ADDITIONAL CONDITION HERE ?
cn_maps.append((cn, inb_set))
weights_additional_info = {'weights': {isite: {}}}
for wdict in self.ordered_weights:
cn_maps_new = []
weight = wdict['weight']
weight_name = wdict['name']
for cn_map in cn_maps:
nb_set = site_nb_sets[cn_map[0]][cn_map[1]]
w_nb_set = weight.weight(nb_set=nb_set, structure_environments=self.structure_environments,
cn_map=cn_map, additional_info=weights_additional_info)
if cn_map not in weights_additional_info['weights'][isite]:
weights_additional_info['weights'][isite][cn_map] = {}
weights_additional_info['weights'][isite][cn_map][weight_name] = w_nb_set
if w_nb_set > 0.0:
cn_maps_new.append(cn_map)
cn_maps = cn_maps_new
for cn_map, weights in weights_additional_info['weights'][isite].items():
weights_additional_info['weights'][isite][cn_map]['Product'] = np.product(list(weights.values()))
w_nb_sets = {cn_map: weights['Product']
for cn_map, weights in weights_additional_info['weights'][isite].items()}
w_nb_sets_total = np.sum(list(w_nb_sets.values()))
nb_sets_fractions = {cn_map: w_nb_set / w_nb_sets_total for cn_map, w_nb_set in w_nb_sets.items()}
for cn_map in weights_additional_info['weights'][isite]:
weights_additional_info['weights'][isite][cn_map]['NbSetFraction'] = nb_sets_fractions[cn_map]
ce_symbols = []
ce_dicts = []
ce_fractions = []
ce_dict_fractions = []
ce_maps = []
site_ce_list = self.structure_environments.ce_list[isite]
if return_all:
for cn_map, nb_set_fraction in nb_sets_fractions.items():
cn = cn_map[0]
inb_set = cn_map[1]
site_ce_nb_set = site_ce_list[cn][inb_set]
if site_ce_nb_set is None:
continue
mingeoms = site_ce_nb_set.minimum_geometries(symmetry_measure_type=self.symmetry_measure_type)
if len(mingeoms) > 0:
csms = [ce_dict['other_symmetry_measures'][self.symmetry_measure_type]
for ce_symbol, ce_dict in mingeoms]
fractions = self.ce_estimator_fractions(csms)
if fractions is None:
ce_symbols.append('UNCLEAR:{:d}'.format(cn))
ce_dicts.append(None)
ce_fractions.append(nb_set_fraction)
all_weights = weights_additional_info['weights'][isite][cn_map]
dict_fractions = {wname: wvalue for wname, wvalue in all_weights.items()}
dict_fractions['CEFraction'] = None
dict_fractions['Fraction'] = nb_set_fraction
ce_dict_fractions.append(dict_fractions)
ce_maps.append(cn_map)
else:
for ifraction, fraction in enumerate(fractions):
ce_symbols.append(mingeoms[ifraction][0])
ce_dicts.append(mingeoms[ifraction][1])
ce_fractions.append(nb_set_fraction * fraction)
all_weights = weights_additional_info['weights'][isite][cn_map]
dict_fractions = {wname: wvalue for wname, wvalue in all_weights.items()}
dict_fractions['CEFraction'] = fraction
dict_fractions['Fraction'] = nb_set_fraction * fraction
ce_dict_fractions.append(dict_fractions)
ce_maps.append(cn_map)
else:
ce_symbols.append('UNCLEAR:{:d}'.format(cn))
ce_dicts.append(None)
ce_fractions.append(nb_set_fraction)
all_weights = weights_additional_info['weights'][isite][cn_map]
dict_fractions = {wname: wvalue for wname, wvalue in all_weights.items()}
dict_fractions['CEFraction'] = None
dict_fractions['Fraction'] = nb_set_fraction
ce_dict_fractions.append(dict_fractions)
ce_maps.append(cn_map)
else:
for cn_map, nb_set_fraction in nb_sets_fractions.items():
if nb_set_fraction > 0.0:
cn = cn_map[0]
inb_set = cn_map[1]
site_ce_nb_set = site_ce_list[cn][inb_set]
mingeoms = site_ce_nb_set.minimum_geometries(symmetry_measure_type=self._symmetry_measure_type)
csms = [ce_dict['other_symmetry_measures'][self._symmetry_measure_type]
for ce_symbol, ce_dict in mingeoms]
fractions = self.ce_estimator_fractions(csms)
for ifraction, fraction in enumerate(fractions):
if fraction > 0.0:
ce_symbols.append(mingeoms[ifraction][0])
ce_dicts.append(mingeoms[ifraction][1])
ce_fractions.append(nb_set_fraction * fraction)
all_weights = weights_additional_info['weights'][isite][cn_map]
dict_fractions = {wname: wvalue for wname, wvalue in all_weights.items()}
dict_fractions['CEFraction'] = fraction
dict_fractions['Fraction'] = nb_set_fraction * fraction
ce_dict_fractions.append(dict_fractions)
ce_maps.append(cn_map)
if ordered:
indices = np.argsort(ce_fractions)[::-1]
else:
indices = list(range(len(ce_fractions)))
fractions_info_list = [
{'ce_symbol': ce_symbols[ii], 'ce_dict': ce_dicts[ii], 'ce_fraction': ce_fractions[ii]}
for ii in indices if ce_fractions[ii] >= min_fraction]
if return_maps:
for ifinfo, ii in enumerate(indices):
if ce_fractions[ii] >= min_fraction:
fractions_info_list[ifinfo]['ce_map'] = ce_maps[ii]
if return_strategy_dict_info:
for ifinfo, ii in enumerate(indices):
if ce_fractions[ii] >= min_fraction:
fractions_info_list[ifinfo]['strategy_info'] = ce_dict_fractions[ii]
return fractions_info_list
def get_site_coordination_environment(self, site):
pass
def get_site_neighbors(self, site):
pass
def get_site_coordination_environments(self, site, isite=None, dequivsite=None, dthissite=None, mysym=None,
return_maps=False):
if isite is None or dequivsite is None or dthissite is None or mysym is None:
[isite, dequivsite, dthissite, mysym] = self.equivalent_site_index_and_transform(site)
return [self.get_site_coordination_environment(site=site, isite=isite, dequivsite=dequivsite,
dthissite=dthissite, mysym=mysym, return_map=return_maps)]
def __eq__(self, other):
return (self.__class__.__name__ == other.__class__.__name__ and
self._additional_condition == other._additional_condition and
self.symmetry_measure_type == other.symmetry_measure_type and
self.dist_ang_area_weight == other.dist_ang_area_weight and
self.self_csm_weight == other.self_csm_weight and
self.delta_csm_weight == other.delta_csm_weight and
self.cn_bias_weight == other.cn_bias_weight and
self.angle_weight == other.angle_weight and
self.normalized_angle_distance_weight == other.normalized_angle_distance_weight and
self.ce_estimator == other.ce_estimator)
def __ne__(self, other):
return not self == other
def as_dict(self):
"""
Bson-serializable dict representation of the MultiWeightsChemenvStrategy object.
:return: Bson-serializable dict representation of the MultiWeightsChemenvStrategy object.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"additional_condition": self._additional_condition,
"symmetry_measure_type": self.symmetry_measure_type,
"dist_ang_area_weight": self.dist_ang_area_weight.as_dict()
if self.dist_ang_area_weight is not None else None,
"self_csm_weight": self.self_csm_weight.as_dict()
if self.self_csm_weight is not None else None,
"delta_csm_weight": self.delta_csm_weight.as_dict()
if self.delta_csm_weight is not None else None,
"cn_bias_weight": self.cn_bias_weight.as_dict()
if self.cn_bias_weight is not None else None,
"angle_weight": self.angle_weight.as_dict()
if self.angle_weight is not None else None,
"normalized_angle_distance_weight": self.normalized_angle_distance_weight.as_dict()
if self.normalized_angle_distance_weight is not None else None,
"ce_estimator": self.ce_estimator,
}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the MultiWeightsChemenvStrategy object from a dict representation of the
MultipleAbundanceChemenvStrategy object created using the as_dict method.
:param d: dict representation of the MultiWeightsChemenvStrategy object
:return: MultiWeightsChemenvStrategy object
"""
if d["normalized_angle_distance_weight"] is not None:
nad_w = NormalizedAngleDistanceNbSetWeight.from_dict(d["normalized_angle_distance_weight"])
else:
nad_w = None
return cls(additional_condition=d["additional_condition"],
symmetry_measure_type=d["symmetry_measure_type"],
dist_ang_area_weight=DistanceAngleAreaNbSetWeight.from_dict(d["dist_ang_area_weight"])
if d["dist_ang_area_weight"] is not None else None,
self_csm_weight=SelfCSMNbSetWeight.from_dict(d["self_csm_weight"])
if d["self_csm_weight"] is not None else None,
delta_csm_weight=DeltaCSMNbSetWeight.from_dict(d["delta_csm_weight"])
if d["delta_csm_weight"] is not None else None,
cn_bias_weight=CNBiasNbSetWeight.from_dict(d["cn_bias_weight"])
if d["cn_bias_weight"] is not None else None,
angle_weight=AngleNbSetWeight.from_dict(d["angle_weight"])
if d["angle_weight"] is not None else None,
normalized_angle_distance_weight=nad_w,
ce_estimator=d["ce_estimator"])
| mit |
pombredanne/pytest_django | setup.py | 2 | 1760 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
file_path = os.path.join(os.path.dirname(__file__), fname)
return codecs.open(file_path, encoding='utf-8').read()
setup(
name='pytest-django',
use_scm_version=True,
description='A Django plugin for py.test.',
author='Andreas Pelme',
author_email='andreas@pelme.se',
maintainer="Andreas Pelme",
maintainer_email="andreas@pelme.se",
url='https://pytest-django.readthedocs.io/',
license='BSD-3-Clause',
packages=['pytest_django'],
long_description=read('README.rst'),
setup_requires=['setuptools_scm==1.8.0'],
install_requires=['pytest>=2.5'],
classifiers=['Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Testing',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# the following makes a plugin available to py.test
entry_points={'pytest11': ['django = pytest_django.plugin']})
| bsd-3-clause |
valtech-mooc/edx-platform | cms/djangoapps/contentstore/tests/test_i18n.py | 133 | 3282 | from unittest import skip
from django.contrib.auth.models import User
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from contentstore.tests.utils import AjaxEnabledTestClient
class InternationalizationTest(ModuleStoreTestCase):
"""
Tests to validate Internationalization.
"""
def setUp(self):
"""
These tests need a user in the DB so that the django Test Client
can log them in.
They inherit from the ModuleStoreTestCase class so that the mongodb collection
will be cleared out before each test case execution and deleted
afterwards.
"""
super(InternationalizationTest, self).setUp(create_user=False)
self.uname = 'testuser'
self.email = 'test+courses@edx.org'
self.password = 'foo'
# Create the use so we can log them in.
self.user = User.objects.create_user(self.uname, self.email, self.password)
# Note that we do not actually need to do anything
# for registration if we directly mark them active.
self.user.is_active = True
# Staff has access to view all courses
self.user.is_staff = True
self.user.save()
self.course_data = {
'org': 'MITx',
'number': '999',
'display_name': 'Robot Super Course',
}
def test_course_plain_english(self):
"""Test viewing the index page with no courses"""
self.client = AjaxEnabledTestClient()
self.client.login(username=self.uname, password=self.password)
resp = self.client.get_html('/home/')
self.assertContains(resp,
'<h1 class="page-header">Studio Home</h1>',
status_code=200,
html=True)
def test_course_explicit_english(self):
"""Test viewing the index page with no courses"""
self.client = AjaxEnabledTestClient()
self.client.login(username=self.uname, password=self.password)
resp = self.client.get_html(
'/home/',
{},
HTTP_ACCEPT_LANGUAGE='en',
)
self.assertContains(resp,
'<h1 class="page-header">Studio Home</h1>',
status_code=200,
html=True)
# ****
# NOTE:
# ****
#
# This test will break when we replace this fake 'test' language
# with actual Esperanto. This test will need to be updated with
# actual Esperanto at that time.
# Test temporarily disable since it depends on creation of dummy strings
@skip
def test_course_with_accents(self):
"""Test viewing the index page with no courses"""
self.client = AjaxEnabledTestClient()
self.client.login(username=self.uname, password=self.password)
resp = self.client.get_html(
'/home/',
{},
HTTP_ACCEPT_LANGUAGE='eo'
)
TEST_STRING = (
u'<h1 class="title-1">'
u'My \xc7\xf6\xfcrs\xe9s L#'
u'</h1>'
)
self.assertContains(resp,
TEST_STRING,
status_code=200,
html=True)
| agpl-3.0 |
beni55/PyGithub | github/Consts.py | 74 | 2748 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
# #193: Line endings should be linux style
# TODO: As of Thu Aug 21 22:40:13 (BJT) Chinese Standard Time 2013
# lots of consts in this project are explict
# should realy round them up and reference them by consts
# EDIT: well, maybe :-)
# ##############################################################################
# Request Header #
# (Case sensitive) #
# ##############################################################################
REQ_IF_NONE_MATCH = "If-None-Match"
REQ_IF_MODIFIED_SINCE = "If-Modified-Since"
# ##############################################################################
# Response Header #
# (Lower Case) #
# ##############################################################################
RES_ETAG = "etag"
RES_LAST_MODIFED = "last-modified"
| gpl-3.0 |
IECS/MansOS | tools/seal/components/sadmote.py | 2 | 2013 | #
# Copyright (c) 2012 Atis Elsts
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Components for SADmote v03
#
from msp430 import *
class SQ100LightSensor(SealSensor):
def __init__(self):
super(SQ100LightSensor, self).__init__("SQ100Light")
self.useFunction.value = "sq100LightRead()"
self.readFunction.value = "sq100LightRead()"
self.extraConfig = SealParameter("""
USE_ADS1115=y
CONST_ADS_INT_PORT=2
CONST_ADS_INT_PIN=0""")
self.extraIncludes = SealParameter("#include <light.h>")
sq100Light = SQ100LightSensor()
# default light sensor for this platform is ISL29003
light.extraConfig.value="""
# for apds & isl
USE_ISL29003=y
USE_SOFT_I2C=y
CONST_SDA_PORT=2
CONST_SDA_PIN=3
CONST_SCL_PORT=2
CONST_SCL_PIN=4"""
| mit |
mezz64/home-assistant | homeassistant/components/keyboard/__init__.py | 26 | 1660 | """Support to emulate keyboard presses on host machine."""
from pykeyboard import PyKeyboard # pylint: disable=import-error
import voluptuous as vol
from homeassistant.const import (
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PLAY_PAUSE,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_UP,
)
DOMAIN = "keyboard"
TAP_KEY_SCHEMA = vol.Schema({})
def setup(hass, config):
"""Listen for keyboard events."""
keyboard = PyKeyboard()
keyboard.special_key_assignment()
hass.services.register(
DOMAIN,
SERVICE_VOLUME_UP,
lambda service: keyboard.tap_key(keyboard.volume_up_key),
schema=TAP_KEY_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_VOLUME_DOWN,
lambda service: keyboard.tap_key(keyboard.volume_down_key),
schema=TAP_KEY_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_VOLUME_MUTE,
lambda service: keyboard.tap_key(keyboard.volume_mute_key),
schema=TAP_KEY_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_MEDIA_PLAY_PAUSE,
lambda service: keyboard.tap_key(keyboard.media_play_pause_key),
schema=TAP_KEY_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_MEDIA_NEXT_TRACK,
lambda service: keyboard.tap_key(keyboard.media_next_track_key),
schema=TAP_KEY_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_MEDIA_PREVIOUS_TRACK,
lambda service: keyboard.tap_key(keyboard.media_prev_track_key),
schema=TAP_KEY_SCHEMA,
)
return True
| apache-2.0 |
hlamer/kate | addons/kate/pate/src/plugins/gdb/cli.py | 3 | 45180 | #
# Copyright 2009, 2013, Shaheed Haque <srhaque@theiet.org>.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License or (at your option) version 3 or any later version
# accepted by the membership of KDE e.V. (or its successor approved
# by the membership of KDE e.V.), which shall act as a proxy
# defined in Section 14 of version 3 of the license.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import argparse
import atexit
import cmd
import re
import traceback
from IPython.frontend.terminal.console.interactiveshell import ZMQTerminalInteractiveShell
from IPython.lib.kernel import find_connection_file
from IPython.zmq.blockingkernelmanager import BlockingKernelManager
from PyQt4.QtCore import QCoreApplication, QObject
from gdb_command_db import GdbCommandDb
from qgdb import QGdbInterpreter
def dbg0(msg, *args):
print("ERR-0", msg.format(*args))
def dbg1(msg, *args):
print("DBG-1", msg.format(*args))
def dbg2(msg, *args):
print("DBG-2", msg.format(*args))
class IPythonConsoleShell(ZMQTerminalInteractiveShell):
"""A simple console shell for IPython.
References:
- http://stackoverflow.com/questions/9977446/connecting-to-a-remote-ipython-instance
- https://github.com/ipython/ipython/blob/master/IPython/zmq/blockingkernelmanager.py
For the Qt version, see:
- http://stackoverflow.com/questions/11513132/embedding-ipython-qt-console-in-a-pyqt-application
"""
def __init__(self, *args, **kwargs):
connection_file = find_connection_file(kwargs.pop("connection_file"))
km = BlockingKernelManager(connection_file=connection_file)
km.load_connection_file()
heartbeat = True
km.start_channels(hb=heartbeat)
atexit.register(km.cleanup_connection_file)
super(IPythonConsoleShell, self).__init__(kernel_manager = km)
self.km = km
def stop(self):
print("IPythonConsoleShell stop()")
self.exit_now = True
#self.km.stop_channels()
self.km.shutdown_kernel()
self.ask_exit()
class MyArgs(argparse.ArgumentParser):
def __init__(self, **kwargs):
super(MyArgs, self).__init__(**kwargs)
def format_usage(self):
formatter = self._get_formatter()
formatter._indent_increment = 4
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups, "")
return formatter.format_help()
def format_help(self):
formatter = self._get_formatter()
formatter._indent_increment = 4
# usage
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups, "")
# description
formatter.add_text(self.description)
# positionals, optionals and user-defined groups
for action_group in self._action_groups:
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help()
class Cli(cmd.Cmd):
"""Python CLI for GDB."""
prompt = "(pygdb) "
#
# Our database of commands.
#
commandDb = None
#
# Commands which will have environment variable substitution applied.
#
filesCommands = None
#
# Output handling.
#
_out = None
def __init__(self, arguments, printLine):
cmd.Cmd.__init__(self)
self._out = printLine
self.gdb = QGdbInterpreter(arguments, printLine)
self.createCommandDb()
def createCommandDb(self):
"""Create a command database we can use to implement our CLI."""
#
# Ask GDB for all the commands it has.
#
helpText = self.gdb.consoleCommand("help all", True)
self.commandDb = GdbCommandDb(helpText)
self.findFilesCommand()
#
# Add in all our overrides; that's any routine starting doXXX.
#
customCommands = [c for c in dir(self) if c.startswith("do_")]
for cmd in customCommands:
self.commandDb.addCustom(getattr(self, cmd))
#dbg0(self.commandDb)
def findFilesCommand(self):
"""Make a list of each command which takes a file/path."""
def matchClass(clazz_exact, arg, indentation, prefix, keyword, apropos, clazz, function):
"""
Add contents of the database which are in the given clazz_exact to
the files set.
"""
if clazz == clazz_exact:
arg[prefix + keyword] = apropos
def matchRegExp(regexp, arg, indentation, prefix, keyword, apropos, clazz, function):
"""
Add contents of the database which match the given regexp to the
files set.
"""
if regexp.search(keyword) or regexp.search(apropos):
arg[prefix + keyword] = apropos
#
# Put all the commands we want to wrap into a dictinary, to avoid duplicates.
#
self.filesCommands = dict()
self.commandDb.walk(matchClass, "files", self.filesCommands)
self.commandDb.walk(matchRegExp, re.compile(" path", re.IGNORECASE), self.filesCommands)
self.commandDb.walk(matchRegExp, re.compile(" file", re.IGNORECASE), self.filesCommands)
#
# See http://lists.baseurl.org/pipermail/yum-devel/2011-August/008495.html
#
def ____cmdloop(self):
""" Sick hack for readline. """
import __builtin__
oraw_input = raw_input
owriter = sys.stdout
_ostdout = owriter #.stream
def _sick_hack_raw_input(prompt):
sys.stdout = _ostdout
#rret = oraw_input(to_utf8(prompt))
rret = oraw_input(prompt)
sys.stdout = owriter
return rret
__builtin__.raw_input = _sick_hack_raw_input
try:
cret = cmd.Cmd.cmdloop(self)
finally:
__builtin__.raw_input = oraw_input
return cret
def asyncWrapper(self, command, args):
"""Execute a command which causes the inferior to run.
"""
dbg0("asyncWrapper", command, args)
command = "{} {}".format(command, args)
dbg0("command", command)
results = self.gdb.consoleCommand(command)
##########################
## Breakpoint commands ##
##########################
def do_break(self, args, getSynopsis = False):
"""
breakpoints
NAME
break -- Set breakpoint at specified line or function
DESCRIPTION
LOCATION may be a probe point, line number, function name, or "*" and an address.
If a line number is specified, break at start of code for that line.
If a function is specified, break at start of code for that function.
If an address is specified, break at that exact address.
With no LOCATION, uses current execution address of the selected
stack frame. This is useful for breaking on return to a stack frame.
THREADNUM is the number from "info threads".
CONDITION is a boolean expression.
Multiple breakpoints at one place are permitted, and useful if their
conditions are different.
Do "help breakpoints" for info on other commands dealing with breakpoints.
"""
parser = MyArgs(prog = "break", add_help = False)
parser.add_argument("-t", "--temporary", action = "store_true", dest = "temporary")
parser.add_argument("-h", "--hardware", action = "store_true", dest = "hw")
parser.add_argument("-d", "--disabled", action = "store_true", dest = "disabled")
parser.add_argument("-a", "--after", type = int, dest = "after")
parser.add_argument("-p", "--probe", choices = ["generic", "stab"], dest = "probe", help = "Generic or SystemTap probe")
parser.add_argument("location", nargs='?')
# TODO add these back when we have optional subcommands working.
#subparsers = parser.add_subparsers()
#if_parser = subparsers.add_parser("if", add_help = False, help = "if CONDITION")
#if_parser.add_argument("condition")
#thread_parser = subparsers.add_parser("thread", add_help = False, help = "thread TID")
#thread_parser.add_argument("tid", type = int)
if getSynopsis:
return parser.format_help()
args = parser.parse_args(args.split())
results = self.gdb._breakpoints.breakpointCreate(**vars(args))
def do_info_breakpoints(self, args):
results = self.gdb._breakpoints.list(args)
if not len(results):
return
#
# Print rows.
#
fmt = "{:<7} {:<14} {:<4} {:<3} {}"
self._out(fmt.format("Num", "Type", "Disp", "Enb", "Where"))
for u in results:
try:
u = u[u'bkpt']
try:
location = u["fullname"]
except KeyError:
try:
location = u["file"]
except KeyError:
try:
location = u["original-location"]
except KeyError:
location = u["at"]
u["type"] = "";
u["disp"] = "";
try:
addr = u["addr"]
except KeyError:
addr = 0
try:
func = u["func"]
line = u["line"]
except KeyError:
func = ""
line = 0
location = "{} {} at {}:{}".format(addr, func, location, line)
self._out(fmt.format(u["number"], u["type"], u["disp"], u["enabled"], location))
try:
times = u["times"]
if times != "0":
self._out(" breakpoint already hit {} times".format(times))
except KeyError:
pass
except KeyError:
#
# Not a standalone breakpoint, just an overload of one.
#
location = "{} {}".format(u["addr"], u["at"])
self._out(fmt.format(u["number"], "", "", u["enabled"], location))
###################
## Data commands ##
###################
def do_call(self, args, getSynopsis = False):
parser = MyArgs(prog = "call", add_help = False)
parser.add_argument("expr")
if getSynopsis:
return parser.format_help()
args = parser.parse_args(args.split())
# TODO assign to local var
self.gdb._data.evalute(**vars(args))
def do_disassemble(self, args, getSynopsis = False):
parser = MyArgs(prog = "disassemble", add_help = False)
parser.add_argument("-s", "--start-addr", type = int)
parser.add_argument("-e", "--end-addr", type = int)
parser.add_argument("-f", "--filename")
parser.add_argument("-l", "--linenum", type = int)
parser.add_argument("-n", "--lines", type = int)
# ["disassembly_only", "with_source", "with_opcodes", "all"]
parser.add_argument("mode", type = int, choices = [ 0, 1, 2, 3 ])
if getSynopsis:
return parser.format_help()
args = parser.parse_args(args.split())
result = self.gdb._data.disassemble(**vars(args))
for u in result:
self._out(u[u'address'], u[u'inst'])
def do_output(self, args, getSynopsis = False):
parser = MyArgs(prog = "output", add_help = False)
parser.add_argument("expr")
if getSynopsis:
return parser.format_help()
args = parser.parse_args(args.split())
self.gdb._data.evalute(**vars(args))
def do_print(self, args, getSynopsis = False):
parser = MyArgs(prog = "print", add_help = False)
parser.add_argument("expr")
if getSynopsis:
return parser.format_help()
args = parser.parse_args(args.split())
# TODO assign to local var
self.gdb._data.evalute(**vars(args))
def do_print(self, args):
"""
data
NAME
print -- Print value of expression EXP
SYNOPSIS
print EXP
DESCRIPTION
EXP can be any of:
- Inferior variables of the lexical environment of the selected
stack frame, plus all those whose scope is global or an entire file.
- $NUM gets previous value number NUM. $ and $$ are the last two
values. $$NUM refers to NUM'th value back from the last one.
- Names starting with $ refer to registers (with the values they
would have if the program were to return to the stack frame now
selected, restoring all registers saved by frames farther in) or
else to ...
- GDB "convenience" variables. Use assignment expressions to give
values to convenience variables.
- {TYPE}ADREXP refers to a datum of data type TYPE, located at address
ADREXP. @ is a binary operator for treating consecutive data objects
anywhere in memory as an array. FOO@NUM gives an array whose first
element is FOO, whose second element is stored in the space following
where FOO is stored, etc. FOO must be an expression whose value
resides in memory.
- Python expressions. In case of ambiguity between an inferior
variable and a python variable, use the "gdb print" or "py print"
commands.
EXP may be preceded with /FMT, where FMT is a format letter
but no count or size letter (see "x" command).
EXAMPLES
print main+1 Print inferior expression.
print $1 Print previous value.
print $getenv("HOME") Print convenience function
print gdb.PYTHONDIR Print Python expression
"""
try:
#
# Assume its an object known to GDB.
#
self.do_gdb("print " + args, name_errors = True)
except NameError as e:
#
# Try a Python variable.
#
try:
self._out(eval(args))
except NameError as f:
self._out("No GDB" + str(e)[2:-1] + ", and Python " + str(f))
def do_info_registers(self, args, getSynopsis = False):
parser = MyArgs(prog = "info registers", add_help = False)
parser.add_argument("regName", nargs = "?")
if getSynopsis:
return parser.format_help()
args = parser.parse_args(args.split())
# TODO assign to local var
results = self.gdb._data.listRegisterValues(**vars(args))
#
# Print rows.
#
for u in results:
self._out(u[u'name'], u[u'value'])
def do_info_all__registers(self, args, getSynopsis = False):
parser = MyArgs(prog = "info all-registers", add_help = False)
parser.add_argument("regName", nargs = "?")
if getSynopsis:
return parser.format_help()
args = parser.parse_args(args.split())
# TODO assign to local var
results = self.gdb._data.listRegisterValues(**vars(args))
#
# Print rows.
#
for u in results:
self._out(u[u'name'], u[u'value'])
def do_x(self, args, getSynopsis = False):
parser = MyArgs(prog = "x", add_help = False)
parser.add_argument("address", type = int)
parser.add_argument("word_format", choices = ["x", "d", "u", "o", "t", "a", "c", "f"])
parser.add_argument("word_size", type = int)
parser.add_argument("nr_rows", type = int)
parser.add_argument("nr_cols", type = int)
parser.add_argument("aschar", nargs="?", default = ".")
parser.add_argument("-o", "--offset-bytes", type = int)
if getSynopsis:
return parser.format_help()
args = parser.parse_args(args.split())
# TODO assign to local var
results = self.gdb._data.readMemory(**vars(args))
for u in results:
self._out(u[u'addr'], u[u'data'])
#####################
## Program control ##
#####################
def do_advance(self, args):
"""
running
NAME
advance -- Continue the program up to the given location (same form as args for break command)
SYNOPSIS
advance [PROBE_MODIFIER] [LOCATION] [thread THREADNUM] [if CONDITION]
DESCRIPTION
Continue the program up to the given location (same form as args for break command).
Execution will also stop upon exit from the current stack frame.
"""
self.asyncWrapper("advance", args)
def do_continue(self, args):
"""
running
NAME
continue -- Continue program being debugged
SYNOPSIS
continue [N|-a]
DESCRIPTION
Continue program being debugged, after signal or breakpoint.
If proceeding from breakpoint, a number N may be used as an argument,
which means to set the ignore count of that breakpoint to N - 1 (so that
the breakpoint won't break until the Nth time it is reached).
If non-stop mode is enabled, continue only the current thread,
otherwise all the threads in the program are continued. To
continue all stopped threads in non-stop mode, use the -a option.
Specifying -a and an ignore count simultaneously is an error.
"""
self.gdb.miCommandExec("-exec-continue", args)
def do_finish(self, args):
"""
running
NAME
finish -- Execute until selected stack frame returns
SYNOPSIS
finish
DESCRIPTION
Execute until selected stack frame returns.
Upon return, the value returned is printed and put in the value history.
"""
self.gdb.miCommandExec("-exec-finish", args)
def do_interrupt(self, args):
self.gdb.miCommandExec("-exec-interrupt", args)
def do_jump(self, args):
"""
running
NAME
jump -- Continue program being debugged at specified line or address
SYNOPSIS
jump LINENUM|*ADDR
DESCRIPTION
Continue program being debugged at specified line or address.
Give as argument either LINENUM or *ADDR, where ADDR is an expression
for an address to start at.
"""
self.asyncWrapper("jump", args)
def do_kill(self, args):
self.gdb.miCommandExec("-exec-abort", args)
def do_next(self, args):
"""
running
NAME
next -- Step program
SYNOPSIS
next [N]
DESCRIPTION
Step program, proceeding through subroutine calls.
Like the "step" command as long as subroutine calls do not happen;
when they do, the call is treated as one instruction.
Argument N means do this N times (or till program stops for another reason).
"""
self.gdb.miCommandExec("-exec-next", args)
def do_nexti(self, args):
"""
running
NAME
nexti -- Step one instruction
SYNOPSIS
nexti [N]
DESCRIPTION
Step one instruction, but proceed through subroutine calls.
Argument N means do this N times (or till program stops for another reason).
"""
self.gdb.miCommandExec("-exec-next-instruction", args)
def do_return(self, args):
self.gdb.miCommandExec("-exec-return", args)
def do_reverse_continue(self, args):
"""
running
NAME
reverse-continue -- Continue program being debugged but run it in reverse
SYNOPSIS
reverse-continue [N]
DESCRIPTION
Continue program being debugged but run it in reverse.
If proceeding from breakpoint, a number N may be used as an argument,
which means to set the ignore count of that breakpoint to N - 1 (so that
the breakpoint won't break until the Nth time it is reached).
"""
self.asyncWrapper("reverse-continue", args)
def do_reverse_finish(self, args):
"""
running
NAME
reverse-finish -- Execute backward until just before selected stack frame is called
SYNOPSIS
reverse-finish
DESCRIPTION
Execute backward until just before selected stack frame is called.
"""
self.asyncWrapper("reverse-finish", args)
def do_reverse_next(self, args):
"""
running
NAME
reverse-next -- Step program backward
SYNOPSIS
reverse-next [N]
DESCRIPTION
Step program backward, proceeding through subroutine calls.
Like the "reverse-step" command as long as subroutine calls do not happen;
when they do, the call is treated as one instruction.
Argument N means do this N times (or till program stops for another reason).
"""
self.asyncWrapper("reverse-next", args)
def do_reverse_nexti(self, args):
"""
running
NAME
reverse-nexti -- Step backward one instruction
SYNOPSIS
reverse-nexti [N]
DESCRIPTION
Step backward one instruction, but proceed through called subroutines.
Argument N means do this N times (or till program stops for another reason).
"""
self.asyncWrapper("reverse-nexti", args)
def do_reverse_step(self, args):
"""
running
NAME
reverse-step -- Step program backward until it reaches the beginning of another source line
SYNOPSIS
reverse-step [N]
DESCRIPTION
Step program backward until it reaches the beginning of another source line.
Argument N means do this N times (or till program stops for another reason).
"""
self.asyncWrapper("reverse-step", args)
def do_reverse_stepi(self, args):
"""
running
NAME
reverse-stepi -- Step backward exactly one instruction
SYNOPSIS
reverse-stepi [N]
DESCRIPTION
Step backward exactly one instruction.
Argument N means do this N times (or till program stops for another reason).
"""
self.asyncWrapper("reverse-stepi", args)
def do_run(self, args):
"""
running
NAME
run -- Start debugged program
SYNOPSIS
run [ARGS]
DESCRIPTION
Start debugged program. You may specify arguments to give it.
Args may include "*", or "[...]"; they are expanded using "sh".
Input and output redirection with ">", "<", or ">>" are also allowed.
With no arguments, uses arguments last specified (with "run" or "set args").
To cancel previous arguments and run with no arguments,
use "set args" without arguments.
"""
tty = self.gdb.startIoThread()
self.gdb.miCommandOne("-inferior-tty-set {}".format(tty))
if args:
self.do_set_args(args)
self.gdb.miCommandExec("-exec-run", args)
def do_set_args(self, args):
self.gdb.miCommandExec("-exec-arguments", args)
def do_show_args(self, args):
self.gdb.miCommandExec("-exec-show-arguments", args)
def do_signal(self, args):
"""
running
NAME
signal -- Continue program giving it signal specified by the argument
SYNOPSIS
signal N
DESCRIPTION
Continue program giving it signal specified by the argument.
An argument of "0" means continue program without giving it a signal.
"""
self.asyncWrapper("signal", args)
def do_start(self, args):
"""
running
NAME
start -- Run the debugged program until the beginning of the main procedure
SYNOPSIS
start [ARGS]
DESCRIPTION
Run the debugged program until the beginning of the main procedure.
You may specify arguments to give to your program, just as with the
"run" command.
"""
results = self.gdb._breakpoints.breakpointCreate("main", temporary = True)
if "pending" in results:
results = self.gdb._breakpoints.breakpointDelete(results["number"])
self._out("Cannot set breakpoint at 'main'")
return
self.do_run(args)
def do_step(self, args):
"""
running
NAME
step -- Step program until it reaches a different source line
SYNOPSIS
step [N]
DESCRIPTION
Step program until it reaches a different source line.
Argument N means do this N times (or till program stops for another reason).
"""
self.gdb.miCommandExec("-exec-step", args)
def do_stepi(self, args):
"""
running
NAME
stepi -- Step one instruction exactly
SYNOPSIS
stepi [N]
DESCRIPTION
Step one instruction exactly.
Argument N means do this N times (or till program stops for another reason).
"""
self.gdb.miCommandExec("-exec-step-instruction", args)
def do_until(self, args):
"""
running
NAME
until -- Execute until the program reaches a source line greater than the current
SYNOPSIS
until [PROBE_MODIFIER] [LOCATION] [thread THREADNUM] [if CONDITION]
DESCRIPTION
Execute until the program reaches a source line greater than the current
or a specified location (same args as break command) within the current frame.
"""
self.gdb.miCommandExec("-exec-until", args)
def do_info_source(self, args):
u = self.gdb._programControl.currentSource()
self._out("Current source file is {}:{}".format(u["file"], u[u'line']))
try:
file = u["fullname"]
except KeyError:
file = u["file"]
self._out("Located in {}".format(file))
if u[u'macro-info'] != "0":
self._out("Does include preprocessor macro info.")
else:
self._out("Does not include preprocessor macro info.")
def do_info_sources(self, args):
results = self.gdb._programControl.allSources()
for u in results:
try:
file = u["fullname"]
except KeyError:
file = u["file"]
self._out(file)
def do_info_files(self, args):
#self.gdb._programControl.execSections()
self.gdb._programControl.symbolFiles()
def do_info_target(self, args):
self.do_info_files(args)
def do_file(self, filename):
self.gdb._programControl.setExecAndSymbols(filename)
#def do_exec_file(self, filename):
# self.gdb._programControl.setExecOnly(filename)
#def do_symbol_file(self, filename):
# self.gdb._programControl.setSymbolsOnly(filename)
####################
## Stack commands ##
####################
def do_bt(self, args):
results = self.gdb._stack.stackFrames(1)
#
# Print rows.
#
for f in results:
u = f[u'frame']
try:
location = u["from"]
except KeyError:
try:
location = u["fullname"] + ":" + u["line"]
except KeyError:
try:
location = u["file"] + ":" + u["line"]
except KeyError:
self._out("#{} {} in {} ()".format(u["level"], u["addr"], u["func"]))
continue
self._out("#{} {} in {} () from {}".format(u["level"], u["addr"], u["func"], location))
def do_backtrace(self, args):
self.do_bt(args)
def do_where(self, args):
self.do_bt(args)
#def do_depth(self, tid, maxFrames = None):
def do_frame(self, args):
if not args:
self.do_info_frame(args)
else:
self.do_info_frame((1, 3))
def do_info_frame(self, args):
u = self.gdb._stack.frameInfo(1)
self._out("#{} {} in {} () from {}".format(u["level"], u["addr"], u["func"], u["from"]))
def do_info_locals(self, args):
#self.gdb._stack.stackArguments(1, 1)
results = self.gdb._stack.frameVariables(1, 1, 8)
for u in results:
try:
self._out("arg {} {} = {} = {}".format(u["arg"], u["name"], u["type"], u["value"]))
except KeyError:
try:
self._out("{} = {} = {}".format(u["name"], u["type"], u["value"]))
except KeyError:
self._out("{} = {}".format(u["name"], u["value"]))
#####################
## Target commands ##
#####################
#'-target-attach'
#'-target-compare-sections'
#'-target-detach'
#'-target-disconnect'
#'-target-download'
#'-target-exec-status'
#'-target-list-available-targets'
#'-target-list-current-targets'
#'-target-list-parameters'
#'-target-list-parameters'
######################
## Thread commands ##
#####################
#'-thread-select'
def do_info_threads(self, args):
currentThread, results = self.gdb._threads.list(args)
if not len(results):
return
#
# Print rows.
#
fmt = "{:<1} {:<4} {:<37} {}"
self._out(fmt.format(" ", "Id", "Target Id", "Where"))
for v in results:
if currentThread == v["id"]:
active = "*"
else:
active = " "
frame = v["frame"]
args = frame["args"]
args = ", ".join(["{}={}".format(d["name"], d["value"]) for d in args])
try:
location = frame["fullname"]
except KeyError:
try:
location = frame["file"]
except KeyError:
location = frame["from"]
try:
line = frame["line"]
except KeyError:
line = ""
location = "{}: {}({}) at {}:{}".format(frame["addr"], frame["func"], args, location, line)
name = v["name"]
if name:
name += ", "
else:
name = ""
self._out(fmt.format(active, v["id"], name + v["target-id"], location))
######################
## General commands ##
######################
#'-enable-timings'
#'-environment-cd'
#'-environment-directory'
#'-environment-path'
#'-environment-pwd'
#'-gdb-exit'
#'-gdb-set'
#'-gdb-show'
#'-gdb-version'
#'-inferior-tty-set'
#'-inferior-tty-show'
#'-interpreter-exec'
#'-list-features'
def do_apropos(self, args):
"""
support
NAME
apropos -- Search for commands matching a REGEXP
SYNOPSIS
apropos REGEXP
DESCRIPTION
Type "apropos word" to search for commands related to "word".
"""
def printAproposEntry(regexp, arg, indentation, prefix, keyword, apropos, clazz, function):
"""Dump the contents of the database as help text.
Only leaf items which match the given regexp are emitted.
"""
if regexp.search(keyword) or regexp.search(apropos):
self._out("\t" + prefix + keyword + " -- " + apropos)
#
# We emit our help database, so that we can override GDB if needed.
#
if args == "":
self._out("REGEXP string is empty")
return
self._out("LIST OF COMMANDS MATCHING '" + args + "'")
self.commandDb.walk(printAproposEntry, re.compile(args, re.IGNORECASE), None, "\t")
self._out("")
def do_EOF(self, args):
"""
alias
NAME
<Ctrl-D> -- Exit GDB.
SYNOPSIS
<Ctrl-D>
DESCRIPTION
Shortcut for "quit".
"""
return True
def do_quit(self, args):
"""
support
NAME
quit -- Exit GDB.
SYNOPSIS
quit
DESCRIPTION
Exit the interpreter. Shortcut: <Ctrl-D>
"""
return True
def do_gdb(self, args):
"""
support
NAME
gdb -- Execute a GDB command directly.
SYNOPSIS
gdb NATIVE-GDB-COMMAND
DESCRIPTION
The command is executed directly, bypassing any overrides in this wrapper.
EXAMPLES
gdb help Get GDB's native help.
"""
results = self.gdb.consoleCommand(args, True)
for line in results:
self._out(line)
def do_help(self, args):
"""
support
NAME
help -- Print list of commands
SYNOPSIS
help [COMMAND|COMMAND-CLASS]
DESCRIPTION
Type "help" followed by a class name for a list of commands in that class.
Type "help all" for the list of all commands.
Type "help" followed by command name for full documentation.
Type "apropos word" to search for commands related to "word".
Command name abbreviations are allowed if unambiguous.
"""
def printManHeader(command, apropos, synopsis, description):
if apropos:
self._out("NAME\n\t" + command + " -- " + apropos)
else:
self._out("NAME\n\t" + command)
if synopsis:
self._out("\nSYNOPSIS\n\t" + synopsis.replace("\n", "\n\t"))
if description:
self._out("\n" + description)
def printClassHelp(keyword):
#
# Now check if the user asked for class-based help.
#
if keyword == "all":
#
# We emit our help database, so that we can override GDB if needed.
#
self._out("LIST OF COMMANDS")
self.commandDb.walk(printAproposEntry, "", None, "\t")
self._out("")
return True
else:
classes = [name for name in self.commandDb.classes_db if name.startswith(keyword)]
if len(classes) == 1:
#
# Emit GDB help for the class.
#
error, helpText = self.gdb.consoleCommand("help " + classes[0], True)
apropos = helpText[0]
synopsis = None
for i in range(1, len(helpText)):
if helpText[i] == "":
#
# Skip the "List of commands"
#
helpText = helpText[i + 1:]
break
if synopsis:
synopsis = "\n\t".join((synopsis, helpText[i]))
else:
synopsis = helpText[i]
printManHeader(classes[0], apropos, synopsis, "LIST OF COMMANDS")
for line in helpText[2:]:
self._out("\t" + line)
return True
elif len(classes) > 1:
message = "Ambiguous keyword: help"
self._out(" ".join((message, keywords[0], str(sorted(classes)))))
self._out("^".rjust(len(message) + 2))
return True
return False
def printAproposEntry(clazzPrefix, arg, indentation, prefix, keyword, apropos, clazz, function):
"""Dump the contents of the database as help text.
Only leaf items which match the given classification prefix are emitted.
"""
if clazz.startswith(clazzPrefix) :
self._out(indentation + keyword + " -- " + apropos)
keywords = args.split()
if (keywords):
#
# First try to find command-specific help.
#
(matched, unmatched, completions, lastMatchedEntry) = self.commandDb.lookup(args)
if unmatched:
if isinstance(completions, dict):
if printClassHelp(keywords[0]):
return
#
# It was not a class-based request for help...
#
message = " ".join(("Keyword not found: help", matched)).rstrip()
self._out(" ".join((message, unmatched, str(sorted(completions.keys())))))
self._out("^".rjust(len(message) + 2))
else:
message = " ".join(("Ambiguous keyword: help", matched)).rstrip()
self._out(" ".join((message, unmatched, str(sorted(completions)))))
self._out("^".rjust(len(message) + 2))
return
#
# We got a match!
#
(oldApropos, oldLevel, oldClazz, oldFunction) = completions
if oldFunction and oldFunction.__doc__:
#
# Emit help for our implementation if we have it.
#
helpText = oldFunction.__doc__.split("\n")
synopsis = helpText[6].lstrip()
if synopsis.startswith(matched):
helpText = [line[2:] for line in helpText[11:]]
else:
helpText = [line[2:] for line in helpText[8:]]
synopsis = matched
else:
#
# Emit help for the GDB implementation.
#
error, helpText = self.gdb.consoleCommand("help " + matched, True)
if len(helpText) > 1 and (helpText[1].startswith(matched) or helpText[1].startswith("Usage:")):
synopsis = helpText[1]
helpText = ["\t" + line for line in helpText[2:]]
elif len(helpText) > 2 and (helpText[2].startswith(matched) or helpText[2].startswith("Usage:")):
synopsis = helpText[2]
helpText = ["\t" + line for line in helpText[3:]]
else:
helpText = ["\t" + line for line in helpText]
synopsis = matched
#
# If we have a dynamically generated synopsis, use it.
#
try:
synopsis = oldFunction(None, getSynopsis = True)
synopsis = synopsis[:-1]
except TypeError:
pass
printManHeader(matched, oldApropos, synopsis, "DESCRIPTION")
for line in helpText:
self._out(line)
else:
#
# Emit summary help from GDB.
#
helpText = self.gdb.consoleCommand("help", True)
self._out("LIST OF CLASSES OF COMMANDS")
for line in helpText[2:]:
self._out("\t" + line)
pythonShell = None
def do_python(self, args):
print("do_python(), calling enter", self.pythonShell)
connectionFile = self.gdb._python.enter(args)
if not self.pythonShell:
self.pythonShell = IPythonConsoleShell(connection_file = connectionFile)
self.pythonShell.interact()
print("do_python(), pythonShell.interact done!")
self.pythonShell.stop()
self.gdb._python.exit()
del self.pythonShell
#################################
## Fallthrough command handler ##
#################################
def default(self, args):
"""
Default command handler, for all commands not matched by a hand-crafted
do_xxx() handler, and any special handlers.
"""
def getenv(name):
from ctypes import CDLL, cChar_p, stringAt
libc = CDLL("libc.so.6")
libc.getenv.argtypes = [cChar_p]
libc.getenv.restype = cChar_p
return libc.getenv(name)
def expandEnvironmentVariables(line):
"""
Fetch any environment variabled, i.e. $FOO or ${FOO}
"""
regexp = re.compile(r"\${(\w+)}|\$(\w+)")
match = regexp.search(line)
while match:
#
# Extract the name of the environment variable.
#
envVar = match.group(1)
if not envVar:
envVar = match.group(2)
#
# Substitute value.
#
envVar = getenv(envVar)
if not envVar:
envVar = ""
line = line[:match.start()] + envVar + line[match.end():]
#
# No recursive resolution for us, so continue from after the
# substitution...
#
match = regexp.search(line, match.start() + len(envVar))
return line
#
# Did we get a command?
#
(matched, unmatched, completions, lastMatchedEntry) = self.commandDb.lookup(args)
if isinstance(completions, list):
self._out("Ambiguous command \"{}\": {}.".format(unmatched, ", ".join(completions)))
return
elif isinstance(completions, tuple) and completions[1]:
subcommands = completions[1]
self._out("\"{}\" must be followed by the name of an {} command.\nList of {} subcommands:\n".format(matched, matched, matched))
for k in sorted(subcommands.keys()):
self._out("{} {} -- {}".format(matched, k, subcommands[k][0]))
return
#
# Extract the arguments.
#
matchedFrags = matched.count(" ") + 1
frags = args.split(None, matchedFrags);
if matchedFrags >= len(frags):
args = ""
else:
args = frags[matchedFrags]
if matched in self.filesCommands:
dbg0("is files command {}", matched)
#
# Does the command which takes files/paths? If so, expand
# any embedded environment variables.
#
args = " ".join(expandEnvironmentVariables(args))
try:
func = getattr(self, "do_" + "_".join(matched.split()))
except AttributeError:
#
# Invoke GDB...
#
self.do_gdb(args)
else:
func(args)
def complete(self, text, state):
"""Use the command database to provide completions."""
matchedKeywords, unmatchedKeyword, completions, lastMatchedEntry = self.commandDb.lookup(text)
#self.stdout.write("=={}==\n".format((matched, unmatched, completions, lastMatchedEntry)))
self.stdout.write("\n{}\n{}{}".format("\t".join(completions), self.prompt, text))
return completions
def completedefault(self, *ignored):
self.stdout.write("completedefault {}".format(ignored))
def completenames(self, text, *ignored):
self.stdout.write("completenames {} {}".format(text, ignored))
if __name__ == "__main__":
import sys
class Test(QObject):
def __init__(self, parent = None):
gdb = Cli(["gdb"], print)
gdb.do_file("/usr/local/bin/kate")
gdb.do_start(None)
gdb.do_break("QWidget::QWidget")
gdb.do_info_breakpoints(None)
gdb.do_continue(None)
gdb.do_x("140737488346128 x 4 8 2") # 0x7fffffffdc10
gdb.do_disassemble("-s 140737488346128 -e 140737488346140 0") # 0x7fffffffdc10
gdb.cmdloop()
app = QCoreApplication(sys.argv)
foo = Test()
#sys.exit(app.exec_())
| lgpl-2.1 |
sposs/DIRAC | Core/Utilities/Graphs/Legend.py | 11 | 7713 | ########################################################################
# $HeadURL$
########################################################################
""" Legend encapsulates a graphical plot legend drawing tool
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
__RCSID__ = "$Id$"
from matplotlib.patches import Rectangle
from matplotlib.text import Text
from DIRAC.Core.Utilities.Graphs.GraphUtilities import *
from DIRAC.Core.Utilities.Graphs.Palette import Palette
from DIRAC.Core.Utilities.Graphs.GraphData import GraphData
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
import types
class Legend:
def __init__(self,data=None,axes=None,*aw,**kw):
self.labels = {}
if type(data) == types.DictType:
for label,ddict in data.items():
#self.labels[label] = pretty_float(max([ float(x) for x in ddict.values() if x ]) )
self.labels[label] = "%.1f" % max([ float(x) for x in ddict.values() if x ])
elif type(data) == types.InstanceType and data.__class__ == GraphData:
self.labels = data.getLabels()
else:
self.labels = data
#self.labels.reverse()
self.ax = axes
self.canvas = None
if self.ax:
self.canvas = self.ax.figure.canvas
self.ax.set_axis_off()
self.prefs = evalPrefs(*aw,**kw)
self.palette = Palette()
if self.labels and self.labels[0][0] != 'NoLabels':
percent_flag = self.prefs.get('legend_unit','')
if percent_flag == "%":
sum_value = sum(data.label_values)
if sum_value > 0.:
self.labels = [(l,v/sum_value*100.) for l,v in self.labels ]
self.__get_column_width()
def dumpPrefs(self):
for key in self.prefs:
print key.rjust(20),':',str(self.prefs[key]).ljust(40)
def setLabels(self,labels):
self.labels = labels
def setAxes(self,axes):
self.ax = axes
self.canvas = self.ax.figure.canvas
self.ax.set_axis_off()
def getLegendSize(self):
self.__get_column_width()
legend_position = self.prefs['legend_position']
legend_width = float(self.prefs['legend_width'])
legend_height = float(self.prefs['legend_height'])
legend_padding = float(self.prefs['legend_padding'])
legend_text_size = self.prefs.get('legend_text_size',self.prefs['text_size'])
legend_text_padding = self.prefs.get('legend_text_padding',self.prefs['text_padding'])
if legend_position in ['right','left']:
# One column in case of vertical legend
legend_width = self.column_width+legend_padding
nLabels = len(self.labels)
legend_max_height = nLabels*(legend_text_size+legend_text_padding)
elif legend_position == 'bottom':
nColumns = min(self.prefs['legend_max_columns'],int(legend_width/self.column_width))
nLabels = len(self.labels)
maxRows = self.prefs['legend_max_rows']
nRows_ax = int(legend_height/1.6/self.prefs['text_size'])
nRows_label = nLabels/nColumns + (nLabels%nColumns != 0)
nRows = max(1,min(min(nRows_label,maxRows),nRows_ax ))
text_padding = self.prefs['text_padding']
text_padding = pixelToPoint(text_padding,self.prefs['dpi'])
legend_height = min(legend_height,(nRows*(self.text_size+text_padding)+text_padding))
legend_max_height = nLabels*(self.text_size+text_padding)
return legend_width,legend_height,legend_max_height
def __get_legend_text_size(self):
dpi = self.prefs['dpi']
text_size = self.prefs['text_size']
text_padding = self.prefs['text_padding']
legend_text_size = self.prefs.get('legend_text_size',text_size)
legend_text_padding = self.prefs.get('legend_text_padding',text_padding)
return legend_text_size,legend_text_padding
def __get_column_width(self):
max_length = 0
max_column_text = ''
flag = self.prefs.get('legend_numbers',True)
unit = self.prefs.get('legend_unit',False)
for label,num in self.labels:
if not flag: num = None
if num is not None:
column_length = len(str(label)+str(num)) + 1
else:
column_length = len(str(label)) + 1
if column_length > max_length:
max_length = column_length
if flag:
if type(num) == types.IntType or type(num) == types.LongType:
numString = str(num)
else:
numString = "%.1f" % float(num)
max_column_text = '%s %s' % (str(label),numString)
if unit:
max_column_text += "%"
else:
max_column_text = '%s ' % str(label)
figure = Figure()
canvas = FigureCanvasAgg(figure)
dpi = self.prefs['dpi']
figure.set_dpi( dpi )
l_size,l_padding = self.__get_legend_text_size()
self.text_size = pixelToPoint(l_size,dpi)
text = Text(0.,0.,text=max_column_text,size=self.text_size)
text.set_figure(figure)
bbox = text.get_window_extent(canvas.get_renderer())
self.column_width = bbox.width+6*l_size
def draw(self):
dpi = self.prefs['dpi']
ax_xsize = self.ax.get_window_extent().width
ax_ysize = self.ax.get_window_extent().height
nLabels = len(self.labels)
nColumns = min(self.prefs['legend_max_columns'],int(ax_xsize/self.column_width))
maxRows = self.prefs['legend_max_rows']
nRows_ax = int(ax_ysize/1.6/self.prefs['text_size'])
nRows_label = nLabels/nColumns + (nLabels%nColumns != 0)
nRows = max(1,min(min(nRows_label,maxRows),nRows_ax ))
maxLabels = nColumns*nRows - 1
self.ax.set_xlim(0.,float(ax_xsize))
self.ax.set_ylim(-float(ax_ysize),0.)
legend_text_size,legend_text_padding = self.__get_legend_text_size()
legend_text_size_point = pixelToPoint(legend_text_size,dpi)
box_width = legend_text_size
legend_offset = (ax_xsize - nColumns*self.column_width)/2
nc = 0
#self.labels.reverse()
for label,num in self.labels:
num_flag = self.prefs.get('legend_numbers',True)
percent_flag = self.prefs.get('legend_unit','')
if num_flag:
if percent_flag == "%":
num = "%.1f" % num +'%'
else:
num = "%.1f" % num
else:
num = None
color = self.palette.getColor(label)
row = nc%nRows
column = nc/nRows
if row == nRows-1 and column == nColumns-1 and nc != nLabels-1:
last_text = '... plus %d more' % (nLabels-nc)
self.ax.text(float(column*self.column_width)+legend_offset,-float(row*1.6*box_width),
last_text,horizontalalignment='left',
verticalalignment='top',size=legend_text_size_point)
break
else:
self.ax.text(float(column*self.column_width)+2.*box_width+legend_offset,-row*1.6*box_width,
str(label),horizontalalignment='left',
verticalalignment='top',size=legend_text_size_point)
if num is not None:
self.ax.text(float((column+1)*self.column_width)-2*box_width+legend_offset,-float(row*1.6*box_width),
str(num),horizontalalignment='right',
verticalalignment='top',size=legend_text_size_point)
box = Rectangle((float(column*self.column_width)+legend_offset,-float(row*1.6*box_width)-box_width),
box_width,box_width)
box.set_ec('black')
box.set_linewidth(pixelToPoint(0.5,dpi))
box.set_fc(color)
self.ax.add_patch(box)
nc += 1
| gpl-3.0 |
gammapy/enrico | enrico/RunGTlike.py | 2 | 12023 | #!/usr/bin/env python
import os,glob,os.path,math
from enrico import utils
from enrico.gtfunction import Observation
from enrico.fitmaker import FitMaker
from enrico.plotting import plot_sed_fromconfig
from enrico import Loggin
import SummedLikelihood
from enrico.xml_model import XmlMaker
from enrico.extern.configobj import ConfigObj
from enrico.utils import hasKey, isKey, typeirfs
# Called per component
def Analysis(folder, config, configgeneric=None, tag="", convtyp='-1', verbose = 1):
mes = Loggin.Message()
""" run an analysis"""
# If there are no xml files, create it and print a warning <--- This should be here?
#if len(glob.glob(config['file']['xml'].replace('.xml','*.xml')))==0:
if len(glob.glob(config['file']['xml']))==0: #.replace('.xml','*.xml')))==0:
mes.warning("Xml not found, creating one for the given config %s" %config['file']['xml'])
XmlMaker(config)
Obs = Observation(folder, config, tag=tag)
if verbose:
utils._log('SUMMARY: ' + tag)
Obs.printSum()
FitRunner = FitMaker(Obs, config)##Class
if config['Spectrum']['FitsGeneration'] == 'yes':
FitRunner.FirstSelection(configgeneric) #Generates fits files for the coarse selection
FitRunner.GenerateFits() #Generates fits files for the rest of the products
return FitRunner
# Called once
def GenAnalysisObjects(config, verbose = 1, xmlfile =""):
# Array containing the list of analysis objects (needed to produce the individual residual maps)
ListOfAnalysisObjects = []
mes = Loggin.Message()
#check is the summed likelihood method should be used and get the
#Analysis objects (observation and (Un)BinnedAnalysis objects)
folder = config['out']
# If there are no xml files, create it and print a warning <--- Not sure if this is needed here.
Fit = SummedLikelihood.SummedLikelihood()
EUnBinned = config['ComponentAnalysis']['EUnBinned']
emintotal = float(config['energy']['emin'])
emaxtotal = float(config['energy']['emax'])
evtnum = [config["event"]["evtype"]] #for std analysis
evtold = evtnum[0] #for std analysis
# Create one obs instance for each component.
# The first 3 can be combined with splitting in energy. The 4th tries to mimick 4FGL.
if isKey(config['ComponentAnalysis'],'FrontBack') == 'yes':
evtnum = [1, 2]
config['analysis']['likelihood'] = "binned"
elif isKey(config['ComponentAnalysis'],'PSF') == 'yes':
evtnum = [4,8,16,32]
config['analysis']['likelihood'] = "binned"
elif isKey(config['ComponentAnalysis'],'EDISP') == 'yes':
evtnum = [64,128,256,521]
config['analysis']['likelihood'] = "binned"
elif isKey(config['ComponentAnalysis'],'FGL4') == 'yes':
# Special case of the PSF component analysis,
# where up to 15 components (energy+PSF) are created following
# 4FGL prescription.
from enrico.catalogComponents import evtnum, energybins, nbinsbins, zmaxbins, ringwidths, pixelsizes
config['analysis']['likelihood'] = "binned"
oldxml = config['file']['xml']
bin_i = 0
roi = 0
# energybins is a dictionary containing an index and a pair of energies
for ebin_i in energybins:
# Restrict the analysis to the specified energy range in all cases.
if emintotal>=energybins[ebin_i][1]:
continue
if emaxtotal<=energybins[ebin_i][0]:
continue
if (roi==0): roi = 2.*ringwidths[ebin_i]+4.
zmax = zmaxbins[ebin_i]
nbinsE = nbinsbins[ebin_i]
energybin = energybins[ebin_i]
for k,evt in enumerate(evtnum):
pixel_size = pixelsizes[ebin_i][k]
if pixel_size<0: continue
tag = "{0}_En{1}".format(typeirfs[evt],ebin_i)
# Approximation, in the 4FGL the core radius changes from src to src!
mes.info("Breaking the analysis in bins ~ 4FGL")
config['event']['evtype'] = evt
config["file"]["xml"] = oldxml.replace(".xml","_")+typeirfs[evt]+"_"+\
"En{0}.xml".format(ebin_i)
config["energy"]["emin"] = max(emintotal,energybin[0])
config["energy"]["emax"] = min(emaxtotal,energybin[1])
config["analysis"]["likelihood"] = "binned"
config["analysis"]["ComputeDiffrsp"] = "no"
config["analysis"]["enumbins_per_decade"] = \
int(1.*nbinsE/math.log10(energybin[1]/energybin[0])+0.5)
config["space"]["rad"] = roi
config["analysis"]["zmax"] = zmax
Analyse = Analysis(folder, config, \
configgeneric=config,\
tag=tag, verbose=verbose)
ListOfAnalysisObjects.append(Analyse)
if not(xmlfile ==""): Analyse.obs.xmlfile = xmlfile
mes.info('Creating Likelihood object for component.')
Fit_component = Analyse.CreateLikeObject()
mes.info('Adding component to the summed likelihood.')
Fit.addComponent(Fit_component)
FitRunner = Analyse
FitRunner.obs.Emin = emintotal
FitRunner.obs.Emax = emaxtotal
config["energy"]["emin"] = emintotal
config["energy"]["emax"] = emaxtotal
config["event"]["evtype"] = evtold
FitRunner.config = config
return FitRunner,Fit,ListOfAnalysisObjects
# Standard (non-4FGL) analysis components
oldxml = config['file']['xml']
for k,evt in enumerate(evtnum):
config['event']['evtype'] = evt
if typeirfs[evt] != "" and typeirfs[evt]!="FRONTBACK":
config["file"]["xml"] = oldxml.replace(".xml","_"+typeirfs[evt]+".xml")
if EUnBinned>emintotal and EUnBinned<emaxtotal:
mes.info("Breaking the analysis in Binned (low energy) and Unbinned (high energies)")
analysestorun = ["lowE","highE"]
for j,TYPE in enumerate(analysestorun):
tag = TYPE
if typeirfs[evt] != "" : tag += "_"+typeirfs[evt]# handle name of fits file
config["file"]["xml"] = oldxml.replace(".xml","_"+tag+".xml")
# Tune parameters
if TYPE is "lowE":
config['energy']['emin'] = emintotal
config['energy']['emax'] = min(config['energy']['emax'],EUnBinned)
config['analysis']['likelihood'] = "binned"
config['analysis']['ComputeDiffrsp'] = "no"
elif TYPE is "highE":
config['energy']['emin'] = max(config['energy']['emin'],EUnBinned)
config['energy']['emax'] = emaxtotal
config['analysis']['likelihood'] = "unbinned"
config['analysis']['ComputeDiffrsp'] = "yes"
Analyse = Analysis(folder, config, \
configgeneric=config,\
tag=tag,\
verbose=verbose)
ListOfAnalysisObjects.append(Analyse)
mes.info('Creating Likelihood object for component.')
Fit_component = Analyse.CreateLikeObject()
mes.info('Adding component to the summed likelihood.')
Fit.addComponent(Fit_component)
FitRunner = Analyse
FitRunner.obs.Emin = emintotal
FitRunner.obs.Emax = emaxtotal
config["energy"]["emin"] = emintotal
config["energy"]["emax"] = emaxtotal
else:
Analyse = Analysis(folder, config, \
configgeneric=config,\
tag=typeirfs[evt], verbose = verbose)
ListOfAnalysisObjects.append(Analyse)
if not(xmlfile ==""): Analyse.obs.xmlfile = xmlfile
mes.info('Creating Likelihood object for component.')
Fit_component = Analyse.CreateLikeObject()
mes.info('Adding component to the summed likelihood.')
Fit.addComponent(Fit_component)
FitRunner = Analyse
config["event"]["evtype"] = evtold
FitRunner.config = config
return FitRunner,Fit,ListOfAnalysisObjects
def run(infile):
from enrico import utils
from enrico import energybin
from enrico.config import get_config
from enrico import Loggin
mes = Loggin.Message()
"""Run an entire Fermi analysis (spectrum) by reading a config file"""
config = get_config(infile)
folder = config['out']
utils.mkdir_p(folder)
FitRunner,Fit,ListOfAnalysisObjects = GenAnalysisObjects(config)
# create all the fit files and run gtlike
FitRunner.PerformFit(Fit)
sedresult = None
#plot the SED and model map if possible and asked
if float(config['UpperLimit']['TSlimit']) < Fit.Ts(config['target']['name']):
if config['Spectrum']['ResultPlots'] == 'yes':
from enrico.constants import SpectrumPath
utils.mkdir_p("%s/%s/" %(config['out'],SpectrumPath))
sedresult = FitRunner.ComputeSED(Fit,dump=True)
else:
sedresult = FitRunner.ComputeSED(Fit,dump=False)
if (config['energy']['decorrelation_energy'] == 'yes'):
#Update the energy scale to decorrelation energy
mes.info('Setting the decorrelation energy as new Scale for the spectral parameters')
spectrum = Fit[FitRunner.obs.srcname].funcs['Spectrum']
modeltype = spectrum.genericName()
genericName = Fit.model.srcs[FitRunner.obs.srcname].spectrum().genericName()
varscale = None
if genericName=="PowerLaw2":
varscale = None
elif genericName in ["PowerLaw", "PLSuperExpCutoff", "EblAtten::PLSuperExpCutoff"]:
varscale = "Scale"
elif genericName in ["LogParabola","EblAtten::LogParabola", \
"BrokenPowerLaw", "EblAtten::BrokenPowerLaw"]:
varscale = "Eb"
if varscale is not None:
try:
spectrum.getParam(varscale).setBounds(20,3e6)
spectrum.getParam(varscale).setValue(sedresult.decE)
FitRunner.PerformFit(Fit)
except RuntimeError:
mes.warning("Error occurred while setting decorrelation energy.")
#Get and dump the target specific results
Result = FitRunner.GetAndPrintResults(Fit)
utils.DumpResult(Result, config)
# Make energy bins by running a *new* analysis
Nbin = config['Ebin']['NumEnergyBins']
if (FitRunner.config['file']['parent_config']==""):
FitRunner.config['file']['parent_config'] = infile
if config['Spectrum']['ResultParentPlots'] == "yes":
print((config['file']['parent_config']))
plot_sed_fromconfig(config['file']['parent_config'],ignore_missing_bins=True)
if config['Spectrum']['ResultPlots'] == 'yes' :
outXml = utils._dump_xml(config)
# the possibility of making the model map is checked inside the function
for AnalysisComponent in ListOfAnalysisObjects:
AnalysisComponent.obs.ModelMap(outXml)
if Nbin>0:
FitRunner.config['Spectrum']['ResultParentPlots'] = "yes"
plot_sed_fromconfig(infile,ignore_missing_bins=True)
energybin.RunEbin(folder,Nbin,Fit,FitRunner,sedresult)
del(sedresult)
del(Result)
del(FitRunner)
# @todo: Should this be a command line utility in bin?
if __name__ == '__main__':
import sys
from enrico import Loggin
mes = Loggin.Message()
try:
infile = sys.argv[1]
except:
print(('Usage: '+sys.argv[0]+' <config file name>'))
mes.error('Config file not found.')
run(infile)
| bsd-3-clause |
Zuli/kernel_sony_lt28 | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
sysalexis/kbengine | kbe/src/lib/python/Lib/test/test_descrtut.py | 119 | 11778 | # This contains most of the executable examples from Guido's descr
# tutorial, once at
#
# http://www.python.org/2.2/descrintro.html
#
# A few examples left implicit in the writeup were fleshed out, a few were
# skipped due to lack of interest (e.g., faking super() by hand isn't
# of much interest anymore), and a few were fiddled to make the output
# deterministic.
from test.support import sortdict
import pprint
class defaultdict(dict):
def __init__(self, default=None):
dict.__init__(self)
self.default = default
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.default
def get(self, key, *args):
if not args:
args = (self.default,)
return dict.get(self, key, *args)
def merge(self, other):
for key in other:
if key not in self:
self[key] = other[key]
test_1 = """
Here's the new type at work:
>>> print(defaultdict) # show our type
<class 'test.test_descrtut.defaultdict'>
>>> print(type(defaultdict)) # its metatype
<class 'type'>
>>> a = defaultdict(default=0.0) # create an instance
>>> print(a) # show the instance
{}
>>> print(type(a)) # show its type
<class 'test.test_descrtut.defaultdict'>
>>> print(a.__class__) # show its class
<class 'test.test_descrtut.defaultdict'>
>>> print(type(a) is a.__class__) # its type is its class
True
>>> a[1] = 3.25 # modify the instance
>>> print(a) # show the new value
{1: 3.25}
>>> print(a[1]) # show the new item
3.25
>>> print(a[0]) # a non-existent item
0.0
>>> a.merge({1:100, 2:200}) # use a dict method
>>> print(sortdict(a)) # show the result
{1: 3.25, 2: 200}
>>>
We can also use the new type in contexts where classic only allows "real"
dictionaries, such as the locals/globals dictionaries for the exec
statement or the built-in function eval():
>>> print(sorted(a.keys()))
[1, 2]
>>> a['print'] = print # need the print function here
>>> exec("x = 3; print(x)", a)
3
>>> print(sorted(a.keys(), key=lambda x: (str(type(x)), x)))
[1, 2, '__builtins__', 'print', 'x']
>>> print(a['x'])
3
>>>
Now I'll show that defaultdict instances have dynamic instance variables,
just like classic classes:
>>> a.default = -1
>>> print(a["noway"])
-1
>>> a.default = -1000
>>> print(a["noway"])
-1000
>>> 'default' in dir(a)
True
>>> a.x1 = 100
>>> a.x2 = 200
>>> print(a.x1)
100
>>> d = dir(a)
>>> 'default' in d and 'x1' in d and 'x2' in d
True
>>> print(sortdict(a.__dict__))
{'default': -1000, 'x1': 100, 'x2': 200}
>>>
"""
class defaultdict2(dict):
__slots__ = ['default']
def __init__(self, default=None):
dict.__init__(self)
self.default = default
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.default
def get(self, key, *args):
if not args:
args = (self.default,)
return dict.get(self, key, *args)
def merge(self, other):
for key in other:
if key not in self:
self[key] = other[key]
test_2 = """
The __slots__ declaration takes a list of instance variables, and reserves
space for exactly these in the instance. When __slots__ is used, other
instance variables cannot be assigned to:
>>> a = defaultdict2(default=0.0)
>>> a[1]
0.0
>>> a.default = -1
>>> a[1]
-1
>>> a.x1 = 1
Traceback (most recent call last):
File "<stdin>", line 1, in ?
AttributeError: 'defaultdict2' object has no attribute 'x1'
>>>
"""
test_3 = """
Introspecting instances of built-in types
For instance of built-in types, x.__class__ is now the same as type(x):
>>> type([])
<class 'list'>
>>> [].__class__
<class 'list'>
>>> list
<class 'list'>
>>> isinstance([], list)
True
>>> isinstance([], dict)
False
>>> isinstance([], object)
True
>>>
You can get the information from the list type:
>>> pprint.pprint(dir(list)) # like list.__dict__.keys(), but sorted
['__add__',
'__class__',
'__contains__',
'__delattr__',
'__delitem__',
'__dir__',
'__doc__',
'__eq__',
'__format__',
'__ge__',
'__getattribute__',
'__getitem__',
'__gt__',
'__hash__',
'__iadd__',
'__imul__',
'__init__',
'__iter__',
'__le__',
'__len__',
'__lt__',
'__mul__',
'__ne__',
'__new__',
'__reduce__',
'__reduce_ex__',
'__repr__',
'__reversed__',
'__rmul__',
'__setattr__',
'__setitem__',
'__sizeof__',
'__str__',
'__subclasshook__',
'append',
'clear',
'copy',
'count',
'extend',
'index',
'insert',
'pop',
'remove',
'reverse',
'sort']
The new introspection API gives more information than the old one: in
addition to the regular methods, it also shows the methods that are
normally invoked through special notations, e.g. __iadd__ (+=), __len__
(len), __ne__ (!=). You can invoke any method from this list directly:
>>> a = ['tic', 'tac']
>>> list.__len__(a) # same as len(a)
2
>>> a.__len__() # ditto
2
>>> list.append(a, 'toe') # same as a.append('toe')
>>> a
['tic', 'tac', 'toe']
>>>
This is just like it is for user-defined classes.
"""
test_4 = """
Static methods and class methods
The new introspection API makes it possible to add static methods and class
methods. Static methods are easy to describe: they behave pretty much like
static methods in C++ or Java. Here's an example:
>>> class C:
...
... @staticmethod
... def foo(x, y):
... print("staticmethod", x, y)
>>> C.foo(1, 2)
staticmethod 1 2
>>> c = C()
>>> c.foo(1, 2)
staticmethod 1 2
Class methods use a similar pattern to declare methods that receive an
implicit first argument that is the *class* for which they are invoked.
>>> class C:
... @classmethod
... def foo(cls, y):
... print("classmethod", cls, y)
>>> C.foo(1)
classmethod <class 'test.test_descrtut.C'> 1
>>> c = C()
>>> c.foo(1)
classmethod <class 'test.test_descrtut.C'> 1
>>> class D(C):
... pass
>>> D.foo(1)
classmethod <class 'test.test_descrtut.D'> 1
>>> d = D()
>>> d.foo(1)
classmethod <class 'test.test_descrtut.D'> 1
This prints "classmethod __main__.D 1" both times; in other words, the
class passed as the first argument of foo() is the class involved in the
call, not the class involved in the definition of foo().
But notice this:
>>> class E(C):
... @classmethod
... def foo(cls, y): # override C.foo
... print("E.foo() called")
... C.foo(y)
>>> E.foo(1)
E.foo() called
classmethod <class 'test.test_descrtut.C'> 1
>>> e = E()
>>> e.foo(1)
E.foo() called
classmethod <class 'test.test_descrtut.C'> 1
In this example, the call to C.foo() from E.foo() will see class C as its
first argument, not class E. This is to be expected, since the call
specifies the class C. But it stresses the difference between these class
methods and methods defined in metaclasses (where an upcall to a metamethod
would pass the target class as an explicit first argument).
"""
test_5 = """
Attributes defined by get/set methods
>>> class property(object):
...
... def __init__(self, get, set=None):
... self.__get = get
... self.__set = set
...
... def __get__(self, inst, type=None):
... return self.__get(inst)
...
... def __set__(self, inst, value):
... if self.__set is None:
... raise AttributeError("this attribute is read-only")
... return self.__set(inst, value)
Now let's define a class with an attribute x defined by a pair of methods,
getx() and setx():
>>> class C(object):
...
... def __init__(self):
... self.__x = 0
...
... def getx(self):
... return self.__x
...
... def setx(self, x):
... if x < 0: x = 0
... self.__x = x
...
... x = property(getx, setx)
Here's a small demonstration:
>>> a = C()
>>> a.x = 10
>>> print(a.x)
10
>>> a.x = -10
>>> print(a.x)
0
>>>
Hmm -- property is builtin now, so let's try it that way too.
>>> del property # unmask the builtin
>>> property
<class 'property'>
>>> class C(object):
... def __init__(self):
... self.__x = 0
... def getx(self):
... return self.__x
... def setx(self, x):
... if x < 0: x = 0
... self.__x = x
... x = property(getx, setx)
>>> a = C()
>>> a.x = 10
>>> print(a.x)
10
>>> a.x = -10
>>> print(a.x)
0
>>>
"""
test_6 = """
Method resolution order
This example is implicit in the writeup.
>>> class A: # implicit new-style class
... def save(self):
... print("called A.save()")
>>> class B(A):
... pass
>>> class C(A):
... def save(self):
... print("called C.save()")
>>> class D(B, C):
... pass
>>> D().save()
called C.save()
>>> class A(object): # explicit new-style class
... def save(self):
... print("called A.save()")
>>> class B(A):
... pass
>>> class C(A):
... def save(self):
... print("called C.save()")
>>> class D(B, C):
... pass
>>> D().save()
called C.save()
"""
class A(object):
def m(self):
return "A"
class B(A):
def m(self):
return "B" + super(B, self).m()
class C(A):
def m(self):
return "C" + super(C, self).m()
class D(C, B):
def m(self):
return "D" + super(D, self).m()
test_7 = """
Cooperative methods and "super"
>>> print(D().m()) # "DCBA"
DCBA
"""
test_8 = """
Backwards incompatibilities
>>> class A:
... def foo(self):
... print("called A.foo()")
>>> class B(A):
... pass
>>> class C(A):
... def foo(self):
... B.foo(self)
>>> C().foo()
called A.foo()
>>> class C(A):
... def foo(self):
... A.foo(self)
>>> C().foo()
called A.foo()
"""
__test__ = {"tut1": test_1,
"tut2": test_2,
"tut3": test_3,
"tut4": test_4,
"tut5": test_5,
"tut6": test_6,
"tut7": test_7,
"tut8": test_8}
# Magic test name that regrtest.py invokes *after* importing this module.
# This worms around a bootstrap problem.
# Note that doctest and regrtest both look in sys.argv for a "-v" argument,
# so this works as expected in both ways of running regrtest.
def test_main(verbose=None):
# Obscure: import this module as test.test_descrtut instead of as
# plain test_descrtut because the name of this module works its way
# into the doctest examples, and unless the full test.test_descrtut
# business is used the name can change depending on how the test is
# invoked.
from test import support, test_descrtut
support.run_doctest(test_descrtut, verbose)
# This part isn't needed for regrtest, but for running the test directly.
if __name__ == "__main__":
test_main(1)
| lgpl-3.0 |
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/eggs/docutils-0.7-py2.7.egg/docutils/io.py | 48 | 14403 | # $Id: io.py 6269 2010-03-18 22:27:53Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
I/O classes provide a uniform API for low-level input and output. Subclasses
will exist for a variety of input/output mechanisms.
"""
__docformat__ = 'reStructuredText'
import sys
try:
import locale
except:
pass
import re
import codecs
from docutils import TransformSpec
from docutils._compat import b
class Input(TransformSpec):
"""
Abstract base class for input wrappers.
"""
component_type = 'input'
default_source_path = None
def __init__(self, source=None, source_path=None, encoding=None,
error_handler='strict'):
self.encoding = encoding
"""Text encoding for the input source."""
self.error_handler = error_handler
"""Text decoding error handler."""
self.source = source
"""The source of input data."""
self.source_path = source_path
"""A text reference to the source."""
if not source_path:
self.source_path = self.default_source_path
self.successful_encoding = None
"""The encoding that successfully decoded the source data."""
def __repr__(self):
return '%s: source=%r, source_path=%r' % (self.__class__, self.source,
self.source_path)
def read(self):
raise NotImplementedError
def decode(self, data):
"""
Decode a string, `data`, heuristically.
Raise UnicodeError if unsuccessful.
The client application should call ``locale.setlocale`` at the
beginning of processing::
locale.setlocale(locale.LC_ALL, '')
"""
if self.encoding and self.encoding.lower() == 'unicode':
assert isinstance(data, unicode), (
'input encoding is "unicode" '
'but input is not a unicode object')
if isinstance(data, unicode):
# Accept unicode even if self.encoding != 'unicode'.
return data
if self.encoding:
# We believe the user/application when the encoding is
# explicitly given.
encodings = [self.encoding]
else:
data_encoding = self.determine_encoding_from_data(data)
if data_encoding:
# If the data declares its encoding (explicitly or via a BOM),
# we believe it.
encodings = [data_encoding]
else:
# Apply heuristics only if no encoding is explicitly given and
# no BOM found. Start with UTF-8, because that only matches
# data that *IS* UTF-8:
encodings = ['utf-8']
try:
encodings.append(locale.getlocale()[1])
except:
pass
try:
encodings.append(locale.getdefaultlocale()[1])
except:
pass
# fallback encoding:
encodings.append('latin-1')
error = None
error_details = ''
for enc in encodings:
if not enc:
continue
try:
decoded = unicode(data, enc, self.error_handler)
self.successful_encoding = enc
# Return decoded, removing BOMs.
return decoded.replace(u'\ufeff', u'')
except (UnicodeError, LookupError), tmperror:
error = tmperror # working around Python 3 deleting the
# error variable after the except clause
if error is not None:
error_details = '\n(%s: %s)' % (error.__class__.__name__, error)
raise UnicodeError(
'Unable to decode input data. Tried the following encodings: '
'%s.%s'
% (', '.join([repr(enc) for enc in encodings if enc]),
error_details))
coding_slug = re.compile(b("coding[:=]\s*([-\w.]+)"))
"""Encoding declaration pattern."""
byte_order_marks = ((codecs.BOM_UTF8, 'utf-8'), # actually 'utf-8-sig'
(codecs.BOM_UTF16_BE, 'utf-16-be'),
(codecs.BOM_UTF16_LE, 'utf-16-le'),)
"""Sequence of (start_bytes, encoding) tuples for encoding detection.
The first bytes of input data are checked against the start_bytes strings.
A match indicates the given encoding."""
def determine_encoding_from_data(self, data):
"""
Try to determine the encoding of `data` by looking *in* `data`.
Check for a byte order mark (BOM) or an encoding declaration.
"""
# check for a byte order mark:
for start_bytes, encoding in self.byte_order_marks:
if data.startswith(start_bytes):
return encoding
# check for an encoding declaration pattern in first 2 lines of file:
for line in data.splitlines()[:2]:
match = self.coding_slug.search(line)
if match:
return match.group(1).decode('ascii')
return None
class Output(TransformSpec):
"""
Abstract base class for output wrappers.
"""
component_type = 'output'
default_destination_path = None
def __init__(self, destination=None, destination_path=None,
encoding=None, error_handler='strict'):
self.encoding = encoding
"""Text encoding for the output destination."""
self.error_handler = error_handler or 'strict'
"""Text encoding error handler."""
self.destination = destination
"""The destination for output data."""
self.destination_path = destination_path
"""A text reference to the destination."""
if not destination_path:
self.destination_path = self.default_destination_path
def __repr__(self):
return ('%s: destination=%r, destination_path=%r'
% (self.__class__, self.destination, self.destination_path))
def write(self, data):
"""`data` is a Unicode string, to be encoded by `self.encode`."""
raise NotImplementedError
def encode(self, data):
if self.encoding and self.encoding.lower() == 'unicode':
assert isinstance(data, unicode), (
'the encoding given is "unicode" but the output is not '
'a Unicode string')
return data
if not isinstance(data, unicode):
# Non-unicode (e.g. binary) output.
return data
else:
return data.encode(self.encoding, self.error_handler)
class FileInput(Input):
"""
Input for single, simple file-like objects.
"""
def __init__(self, source=None, source_path=None,
encoding=None, error_handler='strict',
autoclose=1, handle_io_errors=1, mode='rU'):
"""
:Parameters:
- `source`: either a file-like object (which is read directly), or
`None` (which implies `sys.stdin` if no `source_path` given).
- `source_path`: a path to a file, which is opened and then read.
- `encoding`: the expected text encoding of the input file.
- `error_handler`: the encoding error handler to use.
- `autoclose`: close automatically after read (boolean); always
false if `sys.stdin` is the source.
- `handle_io_errors`: summarize I/O errors here, and exit?
- `mode`: how the file is to be opened (see standard function
`open`). The default 'rU' provides universal newline support
for text files.
"""
Input.__init__(self, source, source_path, encoding, error_handler)
self.autoclose = autoclose
self.handle_io_errors = handle_io_errors
if source is None:
if source_path:
# Specify encoding in Python 3
if sys.version_info >= (3,0):
kwargs = {'encoding': self.encoding,
'errors': self.error_handler}
else:
kwargs = {}
try:
self.source = open(source_path, mode, **kwargs)
except IOError, error:
if not handle_io_errors:
raise
print >>sys.stderr, '%s: %s' % (error.__class__.__name__,
error)
print >>sys.stderr, ('Unable to open source file for '
"reading ('%s'). Exiting." %
source_path)
sys.exit(1)
else:
self.source = sys.stdin
self.autoclose = None
if not source_path:
try:
self.source_path = self.source.name
except AttributeError:
pass
def read(self):
"""
Read and decode a single file and return the data (Unicode string).
"""
try:
data = self.source.read()
finally:
if self.autoclose:
self.close()
return self.decode(data)
def readlines(self):
"""
Return lines of a single file as list of Unicode strings.
"""
try:
lines = self.source.readlines()
finally:
if self.autoclose:
self.close()
return [self.decode(line) for line in lines]
def close(self):
self.source.close()
class FileOutput(Output):
"""
Output for single, simple file-like objects.
"""
def __init__(self, destination=None, destination_path=None,
encoding=None, error_handler='strict', autoclose=1,
handle_io_errors=1):
"""
:Parameters:
- `destination`: either a file-like object (which is written
directly) or `None` (which implies `sys.stdout` if no
`destination_path` given).
- `destination_path`: a path to a file, which is opened and then
written.
- `autoclose`: close automatically after write (boolean); always
false if `sys.stdout` is the destination.
"""
Output.__init__(self, destination, destination_path,
encoding, error_handler)
self.opened = 1
self.autoclose = autoclose
self.handle_io_errors = handle_io_errors
if destination is None:
if destination_path:
self.opened = None
else:
self.destination = sys.stdout
self.autoclose = None
if not destination_path:
try:
self.destination_path = self.destination.name
except AttributeError:
pass
def open(self):
# Specify encoding in Python 3.
# (Do not use binary mode ('wb') as this prevents the
# conversion of newlines to the system specific default.)
if sys.version_info >= (3,0):
kwargs = {'encoding': self.encoding,
'errors': self.error_handler}
else:
kwargs = {}
try:
self.destination = open(self.destination_path, 'w', **kwargs)
except IOError, error:
if not self.handle_io_errors:
raise
print >>sys.stderr, '%s: %s' % (error.__class__.__name__,
error)
print >>sys.stderr, ('Unable to open destination file for writing'
" ('%s'). Exiting." % self.destination_path)
sys.exit(1)
self.opened = 1
def write(self, data):
"""Encode `data`, write it to a single file, and return it.
In Python 3, a (unicode) String is returned.
"""
if sys.version_info >= (3,0):
output = data # in py3k, write expects a (Unicode) string
else:
output = self.encode(data)
if not self.opened:
self.open()
try:
self.destination.write(output)
finally:
if self.autoclose:
self.close()
return output
def close(self):
self.destination.close()
self.opened = None
class BinaryFileOutput(FileOutput):
"""
A version of docutils.io.FileOutput which writes to a binary file.
"""
def open(self):
try:
self.destination = open(self.destination_path, 'wb')
except IOError, error:
if not self.handle_io_errors:
raise
print >>sys.stderr, '%s: %s' % (error.__class__.__name__,
error)
print >>sys.stderr, ('Unable to open destination file for writing '
"('%s'). Exiting." % self.destination_path)
sys.exit(1)
self.opened = 1
class StringInput(Input):
"""
Direct string input.
"""
default_source_path = '<string>'
def read(self):
"""Decode and return the source string."""
return self.decode(self.source)
class StringOutput(Output):
"""
Direct string output.
"""
default_destination_path = '<string>'
def write(self, data):
"""Encode `data`, store it in `self.destination`, and return it."""
self.destination = self.encode(data)
return self.destination
class NullInput(Input):
"""
Degenerate input: read nothing.
"""
default_source_path = 'null input'
def read(self):
"""Return a null string."""
return u''
class NullOutput(Output):
"""
Degenerate output: write nothing.
"""
default_destination_path = 'null output'
def write(self, data):
"""Do nothing ([don't even] send data to the bit bucket)."""
pass
class DocTreeInput(Input):
"""
Adapter for document tree input.
The document tree must be passed in the ``source`` parameter.
"""
default_source_path = 'doctree input'
def read(self):
"""Return the document tree."""
return self.source
| gpl-3.0 |
alimuldal/numpy | numpy/core/tests/test_regression.py | 4 | 80281 | from __future__ import division, absolute_import, print_function
import copy
import pickle
import sys
import platform
import gc
import warnings
import tempfile
from os import path
from io import BytesIO
from itertools import chain
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal, IS_PYPY,
assert_almost_equal, assert_array_equal, assert_array_almost_equal,
assert_raises, assert_warns, dec, suppress_warnings
)
from numpy.testing.utils import _assert_valid_refcount, HAS_REFCOUNT
from numpy.compat import asbytes, asunicode, asbytes_nested, long, sixu
rlevel = 1
class TestRegression(TestCase):
def test_invalid_round(self, level=rlevel):
# Ticket #3
v = 4.7599999999999998
assert_array_equal(np.array([v]), np.array(v))
def test_mem_empty(self, level=rlevel):
# Ticket #7
np.empty((1,), dtype=[('x', np.int64)])
def test_pickle_transposed(self, level=rlevel):
# Ticket #16
a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]]))
f = BytesIO()
pickle.dump(a, f)
f.seek(0)
b = pickle.load(f)
f.close()
assert_array_equal(a, b)
def test_typeNA(self, level=rlevel):
# Ticket #31
assert_equal(np.typeNA[np.int64], 'Int64')
assert_equal(np.typeNA[np.uint64], 'UInt64')
def test_dtype_names(self, level=rlevel):
# Ticket #35
# Should succeed
np.dtype([(('name', 'label'), np.int32, 3)])
def test_reduce(self, level=rlevel):
# Ticket #40
assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5)
def test_zeros_order(self, level=rlevel):
# Ticket #43
np.zeros([3], int, 'C')
np.zeros([3], order='C')
np.zeros([3], int, order='C')
def test_asarray_with_order(self, level=rlevel):
# Check that nothing is done when order='F' and array C/F-contiguous
a = np.ones(2)
assert_(a is np.asarray(a, order='F'))
def test_ravel_with_order(self, level=rlevel):
# Check that ravel works when order='F' and array C/F-contiguous
a = np.ones(2)
assert_(not a.ravel('F').flags.owndata)
def test_sort_bigendian(self, level=rlevel):
# Ticket #47
a = np.linspace(0, 10, 11)
c = a.astype(np.dtype('<f8'))
c.sort()
assert_array_almost_equal(c, a)
def test_negative_nd_indexing(self, level=rlevel):
# Ticket #49
c = np.arange(125).reshape((5, 5, 5))
origidx = np.array([-1, 0, 1])
idx = np.array(origidx)
c[idx]
assert_array_equal(idx, origidx)
def test_char_dump(self, level=rlevel):
# Ticket #50
f = BytesIO()
ca = np.char.array(np.arange(1000, 1010), itemsize=4)
ca.dump(f)
f.seek(0)
ca = np.load(f)
f.close()
def test_noncontiguous_fill(self, level=rlevel):
# Ticket #58.
a = np.zeros((5, 3))
b = a[:, :2,]
def rs():
b.shape = (10,)
self.assertRaises(AttributeError, rs)
def test_bool(self, level=rlevel):
# Ticket #60
np.bool_(1) # Should succeed
def test_indexing1(self, level=rlevel):
# Ticket #64
descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]
buffer = ((([6j, 4j],),),)
h = np.array(buffer, dtype=descr)
h['x']['y']['z']
def test_indexing2(self, level=rlevel):
# Ticket #65
descr = [('x', 'i4', (2,))]
buffer = ([3, 2],)
h = np.array(buffer, dtype=descr)
h['x']
def test_round(self, level=rlevel):
# Ticket #67
x = np.array([1+2j])
assert_almost_equal(x**(-1), [1/(1+2j)])
def test_scalar_compare(self, level=rlevel):
# Trac Ticket #72
# https://github.com/numpy/numpy/issues/565
a = np.array(['test', 'auto'])
assert_array_equal(a == 'auto', np.array([False, True]))
self.assertTrue(a[1] == 'auto')
self.assertTrue(a[0] != 'auto')
b = np.linspace(0, 10, 11)
# This should return true for now, but will eventually raise an error:
with suppress_warnings() as sup:
sup.filter(FutureWarning)
self.assertTrue(b != 'auto')
self.assertTrue(b[0] != 'auto')
def test_unicode_swapping(self, level=rlevel):
# Ticket #79
ulen = 1
ucs_value = sixu('\U0010FFFF')
ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)
ua.newbyteorder() # Should succeed.
def test_object_array_fill(self, level=rlevel):
# Ticket #86
x = np.zeros(1, 'O')
x.fill([])
def test_mem_dtype_align(self, level=rlevel):
# Ticket #93
self.assertRaises(TypeError, np.dtype,
{'names':['a'], 'formats':['foo']}, align=1)
@dec.knownfailureif((sys.version_info[0] >= 3) or
(sys.platform == "win32" and
platform.architecture()[0] == "64bit"),
"numpy.intp('0xff', 16) not supported on Py3, "
"as it does not inherit from Python int")
def test_intp(self, level=rlevel):
# Ticket #99
i_width = np.int_(0).nbytes*2 - 1
np.intp('0x' + 'f'*i_width, 16)
self.assertRaises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16)
self.assertRaises(ValueError, np.intp, '0x1', 32)
assert_equal(255, np.intp('0xFF', 16))
assert_equal(1024, np.intp(1024))
def test_endian_bool_indexing(self, level=rlevel):
# Ticket #105
a = np.arange(10., dtype='>f8')
b = np.arange(10., dtype='<f8')
xa = np.where((a > 2) & (a < 6))
xb = np.where((b > 2) & (b < 6))
ya = ((a > 2) & (a < 6))
yb = ((b > 2) & (b < 6))
assert_array_almost_equal(xa, ya.nonzero())
assert_array_almost_equal(xb, yb.nonzero())
assert_(np.all(a[ya] > 0.5))
assert_(np.all(b[yb] > 0.5))
def test_endian_where(self, level=rlevel):
# GitHub issue #369
net = np.zeros(3, dtype='>f4')
net[1] = 0.00458849
net[2] = 0.605202
max_net = net.max()
test = np.where(net <= 0., max_net, net)
correct = np.array([ 0.60520202, 0.00458849, 0.60520202])
assert_array_almost_equal(test, correct)
def test_endian_recarray(self, level=rlevel):
# Ticket #2185
dt = np.dtype([
('head', '>u4'),
('data', '>u4', 2),
])
buf = np.recarray(1, dtype=dt)
buf[0]['head'] = 1
buf[0]['data'][:] = [1, 1]
h = buf[0]['head']
d = buf[0]['data'][0]
buf[0]['head'] = h
buf[0]['data'][0] = d
assert_(buf[0]['head'] == 1)
def test_mem_dot(self, level=rlevel):
# Ticket #106
x = np.random.randn(0, 1)
y = np.random.randn(10, 1)
# Dummy array to detect bad memory access:
_z = np.ones(10)
_dummy = np.empty((0, 10))
z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides)
np.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
# Do the same for the built-in dot:
np.core.multiarray.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
def test_arange_endian(self, level=rlevel):
# Ticket #111
ref = np.arange(10)
x = np.arange(10, dtype='<f8')
assert_array_equal(ref, x)
x = np.arange(10, dtype='>f8')
assert_array_equal(ref, x)
def test_argmax(self, level=rlevel):
# Ticket #119
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
a.argmax(i) # Should succeed
def test_mem_divmod(self, level=rlevel):
# Ticket #126
for i in range(10):
divmod(np.array([i])[0], 10)
def test_hstack_invalid_dims(self, level=rlevel):
# Ticket #128
x = np.arange(9).reshape((3, 3))
y = np.array([0, 0, 0])
self.assertRaises(ValueError, np.hstack, (x, y))
def test_squeeze_type(self, level=rlevel):
# Ticket #133
a = np.array([3])
b = np.array(3)
assert_(type(a.squeeze()) is np.ndarray)
assert_(type(b.squeeze()) is np.ndarray)
def test_add_identity(self, level=rlevel):
# Ticket #143
assert_equal(0, np.add.identity)
def test_numpy_float_python_long_addition(self):
# Check that numpy float and python longs can be added correctly.
a = np.float_(23.) + 2**135
assert_equal(a, 23. + 2**135)
def test_binary_repr_0(self, level=rlevel):
# Ticket #151
assert_equal('0', np.binary_repr(0))
def test_rec_iterate(self, level=rlevel):
# Ticket #160
descr = np.dtype([('i', int), ('f', float), ('s', '|S3')])
x = np.rec.array([(1, 1.1, '1.0'),
(2, 2.2, '2.0')], dtype=descr)
x[0].tolist()
[i for i in x[0]]
def test_unicode_string_comparison(self, level=rlevel):
# Ticket #190
a = np.array('hello', np.unicode_)
b = np.array('world')
a == b
def test_tobytes_FORTRANORDER_discontiguous(self, level=rlevel):
# Fix in r2836
# Create non-contiguous Fortran ordered array
x = np.array(np.random.rand(3, 3), order='F')[:, :2]
assert_array_almost_equal(x.ravel(), np.fromstring(x.tobytes()))
def test_flat_assignment(self, level=rlevel):
# Correct behaviour of ticket #194
x = np.empty((3, 1))
x.flat = np.arange(3)
assert_array_almost_equal(x, [[0], [1], [2]])
x.flat = np.arange(3, dtype=float)
assert_array_almost_equal(x, [[0], [1], [2]])
def test_broadcast_flat_assignment(self, level=rlevel):
# Ticket #194
x = np.empty((3, 1))
def bfa():
x[:] = np.arange(3)
def bfb():
x[:] = np.arange(3, dtype=float)
self.assertRaises(ValueError, bfa)
self.assertRaises(ValueError, bfb)
def test_nonarray_assignment(self):
# See also Issue gh-2870, test for non-array assignment
# and equivalent unsafe casted array assignment
a = np.arange(10)
b = np.ones(10, dtype=bool)
r = np.arange(10)
def assign(a, b, c):
a[b] = c
assert_raises(ValueError, assign, a, b, np.nan)
a[b] = np.array(np.nan) # but not this.
assert_raises(ValueError, assign, a, r, np.nan)
a[r] = np.array(np.nan)
def test_unpickle_dtype_with_object(self, level=rlevel):
# Implemented in r2840
dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')])
f = BytesIO()
pickle.dump(dt, f)
f.seek(0)
dt_ = pickle.load(f)
f.close()
assert_equal(dt, dt_)
def test_mem_array_creation_invalid_specification(self, level=rlevel):
# Ticket #196
dt = np.dtype([('x', int), ('y', np.object_)])
# Wrong way
self.assertRaises(ValueError, np.array, [1, 'object'], dt)
# Correct way
np.array([(1, 'object')], dt)
def test_recarray_single_element(self, level=rlevel):
# Ticket #202
a = np.array([1, 2, 3], dtype=np.int32)
b = a.copy()
r = np.rec.array(a, shape=1, formats=['3i4'], names=['d'])
assert_array_equal(a, b)
assert_equal(a, r[0][0])
def test_zero_sized_array_indexing(self, level=rlevel):
# Ticket #205
tmp = np.array([])
def index_tmp():
tmp[np.array(10)]
self.assertRaises(IndexError, index_tmp)
def test_chararray_rstrip(self, level=rlevel):
# Ticket #222
x = np.chararray((1,), 5)
x[0] = asbytes('a ')
x = x.rstrip()
assert_equal(x[0], asbytes('a'))
def test_object_array_shape(self, level=rlevel):
# Ticket #239
assert_equal(np.array([[1, 2], 3, 4], dtype=object).shape, (3,))
assert_equal(np.array([[1, 2], [3, 4]], dtype=object).shape, (2, 2))
assert_equal(np.array([(1, 2), (3, 4)], dtype=object).shape, (2, 2))
assert_equal(np.array([], dtype=object).shape, (0,))
assert_equal(np.array([[], [], []], dtype=object).shape, (3, 0))
assert_equal(np.array([[3, 4], [5, 6], None], dtype=object).shape, (3,))
def test_mem_around(self, level=rlevel):
# Ticket #243
x = np.zeros((1,))
y = [0]
decimal = 6
np.around(abs(x-y), decimal) <= 10.0**(-decimal)
def test_character_array_strip(self, level=rlevel):
# Ticket #246
x = np.char.array(("x", "x ", "x "))
for c in x:
assert_equal(c, "x")
def test_lexsort(self, level=rlevel):
# Lexsort memory error
v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
assert_equal(np.lexsort(v), 0)
def test_lexsort_invalid_sequence(self):
# Issue gh-4123
class BuggySequence(object):
def __len__(self):
return 4
def __getitem__(self, key):
raise KeyError
assert_raises(KeyError, np.lexsort, BuggySequence())
def test_pickle_py2_bytes_encoding(self):
# Check that arrays and scalars pickled on Py2 are
# unpickleable on Py3 using encoding='bytes'
test_data = [
# (original, py2_pickle)
(np.unicode_('\u6f2c'),
asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\n"
"I0\ntp6\nbS',o\\x00\\x00'\np7\ntp8\nRp9\n.")),
(np.array([9e123], dtype=np.float64),
asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\n"
"p1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\n"
"p7\n(S'f8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'<'\np11\nNNNI-1\nI-1\n"
"I0\ntp12\nbI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np13\ntp14\nb.")),
(np.array([(9e123,)], dtype=[('name', float)]),
asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n"
"(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n"
"(S'V8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'name'\np12\ntp13\n"
"(dp14\ng12\n(g7\n(S'f8'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'<'\np18\nNNNI-1\n"
"I-1\nI0\ntp19\nbI0\ntp20\nsI8\nI1\nI0\ntp21\n"
"bI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np22\ntp23\nb.")),
]
if sys.version_info[:2] >= (3, 4):
# encoding='bytes' was added in Py3.4
for original, data in test_data:
result = pickle.loads(data, encoding='bytes')
assert_equal(result, original)
if isinstance(result, np.ndarray) and result.dtype.names:
for name in result.dtype.names:
assert_(isinstance(name, str))
def test_pickle_dtype(self, level=rlevel):
# Ticket #251
pickle.dumps(np.float)
def test_swap_real(self, level=rlevel):
# Ticket #265
assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='<c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='>c8').real.max(), 3.0)
assert_equal(np.arange(4, dtype='<c8').real.max(), 3.0)
def test_object_array_from_list(self, level=rlevel):
# Ticket #270
self.assertEqual(np.array([1, 'A', None]).shape, (3,))
def test_multiple_assign(self, level=rlevel):
# Ticket #273
a = np.zeros((3, 1), int)
a[[1, 2]] = 1
def test_empty_array_type(self, level=rlevel):
assert_equal(np.array([]).dtype, np.zeros(0).dtype)
def test_void_copyswap(self, level=rlevel):
dt = np.dtype([('one', '<i4'), ('two', '<i4')])
x = np.array((1, 2), dtype=dt)
x = x.byteswap()
assert_(x['one'] > 1 and x['two'] > 2)
def test_method_args(self, level=rlevel):
# Make sure methods and functions have same default axis
# keyword and arguments
funcs1 = ['argmax', 'argmin', 'sum', ('product', 'prod'),
('sometrue', 'any'),
('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),
'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',
'round', 'min', 'max', 'argsort', 'sort']
funcs2 = ['compress', 'take', 'repeat']
for func in funcs1:
arr = np.random.rand(8, 7)
arr2 = arr.copy()
if isinstance(func, tuple):
func_meth = func[1]
func = func[0]
else:
func_meth = func
res1 = getattr(arr, func_meth)()
res2 = getattr(np, func)(arr2)
if res1 is None:
res1 = arr
if res1.dtype.kind in 'uib':
assert_((res1 == res2).all(), func)
else:
assert_(abs(res1-res2).max() < 1e-8, func)
for func in funcs2:
arr1 = np.random.rand(8, 7)
arr2 = np.random.rand(8, 7)
res1 = None
if func == 'compress':
arr1 = arr1.ravel()
res1 = getattr(arr2, func)(arr1)
else:
arr2 = (15*arr2).astype(int).ravel()
if res1 is None:
res1 = getattr(arr1, func)(arr2)
res2 = getattr(np, func)(arr1, arr2)
assert_(abs(res1-res2).max() < 1e-8, func)
def test_mem_lexsort_strings(self, level=rlevel):
# Ticket #298
lst = ['abc', 'cde', 'fgh']
np.lexsort((lst,))
def test_fancy_index(self, level=rlevel):
# Ticket #302
x = np.array([1, 2])[np.array([0])]
assert_equal(x.shape, (1,))
def test_recarray_copy(self, level=rlevel):
# Ticket #312
dt = [('x', np.int16), ('y', np.float64)]
ra = np.array([(1, 2.3)], dtype=dt)
rb = np.rec.array(ra, dtype=dt)
rb['x'] = 2.
assert_(ra['x'] != rb['x'])
def test_rec_fromarray(self, level=rlevel):
# Ticket #322
x1 = np.array([[1, 2], [3, 4], [5, 6]])
x2 = np.array(['a', 'dd', 'xyz'])
x3 = np.array([1.1, 2, 3])
np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8")
def test_object_array_assign(self, level=rlevel):
x = np.empty((2, 2), object)
x.flat[2] = (1, 2, 3)
assert_equal(x.flat[2], (1, 2, 3))
def test_ndmin_float64(self, level=rlevel):
# Ticket #324
x = np.array([1, 2, 3], dtype=np.float64)
assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2)
assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2)
def test_ndmin_order(self, level=rlevel):
# Issue #465 and related checks
assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous)
assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous)
def test_mem_axis_minimization(self, level=rlevel):
# Ticket #327
data = np.arange(5)
data = np.add.outer(data, data)
def test_mem_float_imag(self, level=rlevel):
# Ticket #330
np.float64(1.0).imag
def test_dtype_tuple(self, level=rlevel):
# Ticket #334
assert_(np.dtype('i4') == np.dtype(('i4', ())))
def test_dtype_posttuple(self, level=rlevel):
# Ticket #335
np.dtype([('col1', '()i4')])
def test_numeric_carray_compare(self, level=rlevel):
# Ticket #341
assert_equal(np.array(['X'], 'c'), asbytes('X'))
def test_string_array_size(self, level=rlevel):
# Ticket #342
self.assertRaises(ValueError,
np.array, [['X'], ['X', 'X', 'X']], '|S1')
def test_dtype_repr(self, level=rlevel):
# Ticket #344
dt1 = np.dtype(('uint32', 2))
dt2 = np.dtype(('uint32', (2,)))
assert_equal(dt1.__repr__(), dt2.__repr__())
def test_reshape_order(self, level=rlevel):
# Make sure reshape order works.
a = np.arange(6).reshape(2, 3, order='F')
assert_equal(a, [[0, 2, 4], [1, 3, 5]])
a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
b = a[:, 1]
assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]])
def test_reshape_zero_strides(self, level=rlevel):
# Issue #380, test reshaping of zero strided arrays
a = np.ones(1)
a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,))
assert_(a.reshape(5, 1).strides[0] == 0)
def test_reshape_zero_size(self, level=rlevel):
# GitHub Issue #2700, setting shape failed for 0-sized arrays
a = np.ones((0, 2))
a.shape = (-1, 2)
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
# With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous.
@dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)
def test_reshape_trailing_ones_strides(self):
# GitHub issue gh-2949, bad strides for trailing ones of new shape
a = np.zeros(12, dtype=np.int32)[::2] # not contiguous
strides_c = (16, 8, 8, 8)
strides_f = (8, 24, 48, 48)
assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c)
assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f)
assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4))
def test_repeat_discont(self, level=rlevel):
# Ticket #352
a = np.arange(12).reshape(4, 3)[:, 2]
assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11])
def test_array_index(self, level=rlevel):
# Make sure optimization is not called in this case.
a = np.array([1, 2, 3])
a2 = np.array([[1, 2, 3]])
assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)])
def test_object_argmax(self, level=rlevel):
a = np.array([1, 2, 3], dtype=object)
assert_(a.argmax() == 2)
def test_recarray_fields(self, level=rlevel):
# Ticket #372
dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')])
dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')])
for a in [np.array([(1, 2), (3, 4)], "i4,i4"),
np.rec.array([(1, 2), (3, 4)], "i4,i4"),
np.rec.array([(1, 2), (3, 4)]),
np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"),
np.rec.fromarrays([(1, 2), (3, 4)])]:
assert_(a.dtype in [dt0, dt1])
def test_random_shuffle(self, level=rlevel):
# Ticket #374
a = np.arange(5).reshape((5, 1))
b = a.copy()
np.random.shuffle(b)
assert_equal(np.sort(b, axis=0), a)
def test_refcount_vdot(self, level=rlevel):
# Changeset #3443
_assert_valid_refcount(np.vdot)
def test_startswith(self, level=rlevel):
ca = np.char.array(['Hi', 'There'])
assert_equal(ca.startswith('H'), [True, False])
def test_noncommutative_reduce_accumulate(self, level=rlevel):
# Ticket #413
tosubtract = np.arange(5)
todivide = np.array([2.0, 0.5, 0.25])
assert_equal(np.subtract.reduce(tosubtract), -10)
assert_equal(np.divide.reduce(todivide), 16.0)
assert_array_equal(np.subtract.accumulate(tosubtract),
np.array([0, -1, -3, -6, -10]))
assert_array_equal(np.divide.accumulate(todivide),
np.array([2., 4., 16.]))
def test_convolve_empty(self, level=rlevel):
# Convolve should raise an error for empty input array.
self.assertRaises(ValueError, np.convolve, [], [1])
self.assertRaises(ValueError, np.convolve, [1], [])
def test_multidim_byteswap(self, level=rlevel):
# Ticket #449
r = np.array([(1, (0, 1, 2))], dtype="i2,3i2")
assert_array_equal(r.byteswap(),
np.array([(256, (0, 256, 512))], r.dtype))
def test_string_NULL(self, level=rlevel):
# Changeset 3557
assert_equal(np.array("a\x00\x0b\x0c\x00").item(),
'a\x00\x0b\x0c')
def test_junk_in_string_fields_of_recarray(self, level=rlevel):
# Ticket #483
r = np.array([[asbytes('abc')]], dtype=[('var1', '|S20')])
assert_(asbytes(r['var1'][0][0]) == asbytes('abc'))
def test_take_output(self, level=rlevel):
# Ensure that 'take' honours output parameter.
x = np.arange(12).reshape((3, 4))
a = np.take(x, [0, 2], axis=1)
b = np.zeros_like(a)
np.take(x, [0, 2], axis=1, out=b)
assert_array_equal(a, b)
def test_take_object_fail(self):
# Issue gh-3001
d = 123.
a = np.array([d, 1], dtype=object)
if HAS_REFCOUNT:
ref_d = sys.getrefcount(d)
try:
a.take([0, 100])
except IndexError:
pass
if HAS_REFCOUNT:
assert_(ref_d == sys.getrefcount(d))
def test_array_str_64bit(self, level=rlevel):
# Ticket #501
s = np.array([1, np.nan], dtype=np.float64)
with np.errstate(all='raise'):
np.array_str(s) # Should succeed
def test_frompyfunc_endian(self, level=rlevel):
# Ticket #503
from math import radians
uradians = np.frompyfunc(radians, 1, 1)
big_endian = np.array([83.4, 83.5], dtype='>f8')
little_endian = np.array([83.4, 83.5], dtype='<f8')
assert_almost_equal(uradians(big_endian).astype(float),
uradians(little_endian).astype(float))
def test_mem_string_arr(self, level=rlevel):
# Ticket #514
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
t = []
np.hstack((t, s))
def test_arr_transpose(self, level=rlevel):
# Ticket #516
x = np.random.rand(*(2,)*16)
x.transpose(list(range(16))) # Should succeed
def test_string_mergesort(self, level=rlevel):
# Ticket #540
x = np.array(['a']*32)
assert_array_equal(x.argsort(kind='m'), np.arange(32))
def test_argmax_byteorder(self, level=rlevel):
# Ticket #546
a = np.arange(3, dtype='>f')
assert_(a[a.argmax()] == a.max())
def test_rand_seed(self, level=rlevel):
# Ticket #555
for l in np.arange(4):
np.random.seed(l)
def test_mem_deallocation_leak(self, level=rlevel):
# Ticket #562
a = np.zeros(5, dtype=float)
b = np.array(a, dtype=float)
del a, b
def test_mem_on_invalid_dtype(self):
"Ticket #583"
self.assertRaises(ValueError, np.fromiter, [['12', ''], ['13', '']], str)
def test_dot_negative_stride(self, level=rlevel):
# Ticket #588
x = np.array([[1, 5, 25, 125., 625]])
y = np.array([[20.], [160.], [640.], [1280.], [1024.]])
z = y[::-1].copy()
y2 = y[::-1]
assert_equal(np.dot(x, z), np.dot(x, y2))
def test_object_casting(self, level=rlevel):
# This used to trigger the object-type version of
# the bitwise_or operation, because float64 -> object
# casting succeeds
def rs():
x = np.ones([484, 286])
y = np.zeros([484, 286])
x |= y
self.assertRaises(TypeError, rs)
def test_unicode_scalar(self, level=rlevel):
# Ticket #600
x = np.array(["DROND", "DROND1"], dtype="U6")
el = x[1]
new = pickle.loads(pickle.dumps(el))
assert_equal(new, el)
def test_arange_non_native_dtype(self, level=rlevel):
# Ticket #616
for T in ('>f4', '<f4'):
dt = np.dtype(T)
assert_equal(np.arange(0, dtype=dt).dtype, dt)
assert_equal(np.arange(0.5, dtype=dt).dtype, dt)
assert_equal(np.arange(5, dtype=dt).dtype, dt)
def test_bool_flat_indexing_invalid_nr_elements(self, level=rlevel):
s = np.ones(10, dtype=float)
x = np.array((15,), dtype=float)
def ia(x, s, v):
x[(s > 0)] = v
# After removing deprecation, the following are ValueErrors.
# This might seem odd as compared to the value error below. This
# is due to the fact that the new code always uses "nonzero" logic
# and the boolean special case is not taken.
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
sup.filter(FutureWarning)
sup.filter(np.VisibleDeprecationWarning)
self.assertRaises(IndexError, ia, x, s, np.zeros(9, dtype=float))
self.assertRaises(IndexError, ia, x, s, np.zeros(11, dtype=float))
# Old special case (different code path):
self.assertRaises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))
self.assertRaises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float))
def test_mem_scalar_indexing(self, level=rlevel):
# Ticket #603
x = np.array([0], dtype=float)
index = np.array(0, dtype=np.int32)
x[index]
def test_binary_repr_0_width(self, level=rlevel):
assert_equal(np.binary_repr(0, width=3), '000')
def test_fromstring(self, level=rlevel):
assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"),
[12, 9, 9])
def test_searchsorted_variable_length(self, level=rlevel):
x = np.array(['a', 'aa', 'b'])
y = np.array(['d', 'e'])
assert_equal(x.searchsorted(y), [3, 3])
def test_string_argsort_with_zeros(self, level=rlevel):
# Check argsort for strings containing zeros.
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
assert_array_equal(x.argsort(kind='m'), np.array([1, 0]))
assert_array_equal(x.argsort(kind='q'), np.array([1, 0]))
def test_string_sort_with_zeros(self, level=rlevel):
# Check sort for strings containing zeros.
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
y = np.fromstring("\x00\x01\x00\x02", dtype="|S2")
assert_array_equal(np.sort(x, kind="q"), y)
def test_copy_detection_zero_dim(self, level=rlevel):
# Ticket #658
np.indices((0, 3, 4)).T.reshape(-1, 3)
def test_flat_byteorder(self, level=rlevel):
# Ticket #657
x = np.arange(10)
assert_array_equal(x.astype('>i4'), x.astype('<i4').flat[:])
assert_array_equal(x.astype('>i4').flat[:], x.astype('<i4'))
def test_uint64_from_negative(self, level=rlevel):
assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
def test_sign_bit(self, level=rlevel):
x = np.array([0, -0.0, 0])
assert_equal(str(np.abs(x)), '[ 0. 0. 0.]')
def test_flat_index_byteswap(self, level=rlevel):
for dt in (np.dtype('<i4'), np.dtype('>i4')):
x = np.array([-1, 0, 1], dtype=dt)
assert_equal(x.flat[0].dtype, x[0].dtype)
def test_copy_detection_corner_case(self, level=rlevel):
# Ticket #658
np.indices((0, 3, 4)).T.reshape(-1, 3)
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
# With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous,
# 0-sized reshape itself is tested elsewhere.
@dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)
def test_copy_detection_corner_case2(self, level=rlevel):
# Ticket #771: strides are not set correctly when reshaping 0-sized
# arrays
b = np.indices((0, 3, 4)).T.reshape(-1, 3)
assert_equal(b.strides, (3 * b.itemsize, b.itemsize))
def test_object_array_refcounting(self, level=rlevel):
# Ticket #633
if not hasattr(sys, 'getrefcount'):
return
# NB. this is probably CPython-specific
cnt = sys.getrefcount
a = object()
b = object()
c = object()
cnt0_a = cnt(a)
cnt0_b = cnt(b)
cnt0_c = cnt(c)
# -- 0d -> 1-d broadcast slice assignment
arr = np.zeros(5, dtype=np.object_)
arr[:] = a
assert_equal(cnt(a), cnt0_a + 5)
arr[:] = b
assert_equal(cnt(a), cnt0_a)
assert_equal(cnt(b), cnt0_b + 5)
arr[:2] = c
assert_equal(cnt(b), cnt0_b + 3)
assert_equal(cnt(c), cnt0_c + 2)
del arr
# -- 1-d -> 2-d broadcast slice assignment
arr = np.zeros((5, 2), dtype=np.object_)
arr0 = np.zeros(2, dtype=np.object_)
arr0[0] = a
assert_(cnt(a) == cnt0_a + 1)
arr0[1] = b
assert_(cnt(b) == cnt0_b + 1)
arr[:, :] = arr0
assert_(cnt(a) == cnt0_a + 6)
assert_(cnt(b) == cnt0_b + 6)
arr[:, 0] = None
assert_(cnt(a) == cnt0_a + 1)
del arr, arr0
# -- 2-d copying + flattening
arr = np.zeros((5, 2), dtype=np.object_)
arr[:, 0] = a
arr[:, 1] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
arr2 = arr[:, 0].copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.flatten()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
del arr, arr2
# -- concatenate, repeat, take, choose
arr1 = np.zeros((5, 1), dtype=np.object_)
arr2 = np.zeros((5, 1), dtype=np.object_)
arr1[...] = a
arr2[...] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
tmp = np.concatenate((arr1, arr2))
assert_(cnt(a) == cnt0_a + 5 + 5)
assert_(cnt(b) == cnt0_b + 5 + 5)
tmp = arr1.repeat(3, axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3*5)
tmp = arr1.take([1, 2, 3], axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3)
x = np.array([[0], [1], [0], [1], [1]], int)
tmp = x.choose(arr1, arr2)
assert_(cnt(a) == cnt0_a + 5 + 2)
assert_(cnt(b) == cnt0_b + 5 + 3)
del tmp # Avoid pyflakes unused variable warning
def test_mem_custom_float_to_array(self, level=rlevel):
# Ticket 702
class MyFloat(object):
def __float__(self):
return 1.0
tmp = np.atleast_1d([MyFloat()])
tmp.astype(float) # Should succeed
def test_object_array_refcount_self_assign(self, level=rlevel):
# Ticket #711
class VictimObject(object):
deleted = False
def __del__(self):
self.deleted = True
d = VictimObject()
arr = np.zeros(5, dtype=np.object_)
arr[:] = d
del d
arr[:] = arr # refcount of 'd' might hit zero here
assert_(not arr[0].deleted)
arr[:] = arr # trying to induce a segfault by doing it again...
assert_(not arr[0].deleted)
def test_mem_fromiter_invalid_dtype_string(self, level=rlevel):
x = [1, 2, 3]
self.assertRaises(ValueError,
np.fromiter, [xi for xi in x], dtype='S')
def test_reduce_big_object_array(self, level=rlevel):
# Ticket #713
oldsize = np.setbufsize(10*16)
a = np.array([None]*161, object)
assert_(not np.any(a))
np.setbufsize(oldsize)
def test_mem_0d_array_index(self, level=rlevel):
# Ticket #714
np.zeros(10)[np.array(0)]
def test_floats_from_string(self, level=rlevel):
# Ticket #640, floats from string
fsingle = np.single('1.234')
fdouble = np.double('1.234')
flongdouble = np.longdouble('1.234')
assert_almost_equal(fsingle, 1.234)
assert_almost_equal(fdouble, 1.234)
assert_almost_equal(flongdouble, 1.234)
def test_nonnative_endian_fill(self, level=rlevel):
# Non-native endian arrays were incorrectly filled with scalars
# before r5034.
if sys.byteorder == 'little':
dtype = np.dtype('>i4')
else:
dtype = np.dtype('<i4')
x = np.empty([1], dtype=dtype)
x.fill(1)
assert_equal(x, np.array([1], dtype=dtype))
def test_dot_alignment_sse2(self, level=rlevel):
# Test for ticket #551, changeset r5140
x = np.zeros((30, 40))
y = pickle.loads(pickle.dumps(x))
# y is now typically not aligned on a 8-byte boundary
z = np.ones((1, y.shape[0]))
# This shouldn't cause a segmentation fault:
np.dot(z, y)
def test_astype_copy(self, level=rlevel):
# Ticket #788, changeset r5155
# The test data file was generated by scipy.io.savemat.
# The dtype is float64, but the isbuiltin attribute is 0.
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, "astype_copy.pkl")
if sys.version_info[0] >= 3:
f = open(filename, 'rb')
xp = pickle.load(f, encoding='latin1')
f.close()
else:
f = open(filename)
xp = pickle.load(f)
f.close()
xpd = xp.astype(np.float64)
assert_((xp.__array_interface__['data'][0] !=
xpd.__array_interface__['data'][0]))
def test_compress_small_type(self, level=rlevel):
# Ticket #789, changeset 5217.
# compress with out argument segfaulted if cannot cast safely
import numpy as np
a = np.array([[1, 2], [3, 4]])
b = np.zeros((2, 1), dtype=np.single)
try:
a.compress([True, False], axis=1, out=b)
raise AssertionError("compress with an out which cannot be "
"safely casted should not return "
"successfully")
except TypeError:
pass
def test_attributes(self, level=rlevel):
# Ticket #791
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, 'info', '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
assert_(dat.info == 'jubba')
dat.resize((4, 2))
assert_(dat.info == 'jubba')
dat.sort()
assert_(dat.info == 'jubba')
dat.fill(2)
assert_(dat.info == 'jubba')
dat.put([2, 3, 4], [6, 3, 4])
assert_(dat.info == 'jubba')
dat.setfield(4, np.int32, 0)
assert_(dat.info == 'jubba')
dat.setflags()
assert_(dat.info == 'jubba')
assert_(dat.all(1).info == 'jubba')
assert_(dat.any(1).info == 'jubba')
assert_(dat.argmax(1).info == 'jubba')
assert_(dat.argmin(1).info == 'jubba')
assert_(dat.argsort(1).info == 'jubba')
assert_(dat.astype(TestArray).info == 'jubba')
assert_(dat.byteswap().info == 'jubba')
assert_(dat.clip(2, 7).info == 'jubba')
assert_(dat.compress([0, 1, 1]).info == 'jubba')
assert_(dat.conj().info == 'jubba')
assert_(dat.conjugate().info == 'jubba')
assert_(dat.copy().info == 'jubba')
dat2 = TestArray([2, 3, 1, 0], 'jubba')
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
assert_(dat2.choose(choices).info == 'jubba')
assert_(dat.cumprod(1).info == 'jubba')
assert_(dat.cumsum(1).info == 'jubba')
assert_(dat.diagonal().info == 'jubba')
assert_(dat.flatten().info == 'jubba')
assert_(dat.getfield(np.int32, 0).info == 'jubba')
assert_(dat.imag.info == 'jubba')
assert_(dat.max(1).info == 'jubba')
assert_(dat.mean(1).info == 'jubba')
assert_(dat.min(1).info == 'jubba')
assert_(dat.newbyteorder().info == 'jubba')
assert_(dat.prod(1).info == 'jubba')
assert_(dat.ptp(1).info == 'jubba')
assert_(dat.ravel().info == 'jubba')
assert_(dat.real.info == 'jubba')
assert_(dat.repeat(2).info == 'jubba')
assert_(dat.reshape((2, 4)).info == 'jubba')
assert_(dat.round().info == 'jubba')
assert_(dat.squeeze().info == 'jubba')
assert_(dat.std(1).info == 'jubba')
assert_(dat.sum(1).info == 'jubba')
assert_(dat.swapaxes(0, 1).info == 'jubba')
assert_(dat.take([2, 3, 5]).info == 'jubba')
assert_(dat.transpose().info == 'jubba')
assert_(dat.T.info == 'jubba')
assert_(dat.var(1).info == 'jubba')
assert_(dat.view(TestArray).info == 'jubba')
# These methods do not preserve subclasses
assert_(type(dat.nonzero()[0]) is np.ndarray)
assert_(type(dat.nonzero()[1]) is np.ndarray)
def test_recarray_tolist(self, level=rlevel):
# Ticket #793, changeset r5215
# Comparisons fail for NaN, so we can't use random memory
# for the test.
buf = np.zeros(40, dtype=np.int8)
a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf)
b = a.tolist()
assert_( a[0].tolist() == b[0])
assert_( a[1].tolist() == b[1])
def test_nonscalar_item_method(self):
# Make sure that .item() fails graciously when it should
a = np.arange(5)
assert_raises(ValueError, a.item)
def test_char_array_creation(self, level=rlevel):
a = np.array('123', dtype='c')
b = np.array(asbytes_nested(['1', '2', '3']))
assert_equal(a, b)
def test_unaligned_unicode_access(self, level=rlevel):
# Ticket #825
for i in range(1, 9):
msg = 'unicode offset: %d chars' % i
t = np.dtype([('a', 'S%d' % i), ('b', 'U2')])
x = np.array([(asbytes('a'), sixu('b'))], dtype=t)
if sys.version_info[0] >= 3:
assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg)
else:
assert_equal(str(x), "[('a', u'b')]", err_msg=msg)
def test_sign_for_complex_nan(self, level=rlevel):
# Ticket 794.
with np.errstate(invalid='ignore'):
C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])
have = np.sign(C)
want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])
assert_equal(have, want)
def test_for_equal_names(self, level=rlevel):
# Ticket #674
dt = np.dtype([('foo', float), ('bar', float)])
a = np.zeros(10, dt)
b = list(a.dtype.names)
b[0] = "notfoo"
a.dtype.names = b
assert_(a.dtype.names[0] == "notfoo")
assert_(a.dtype.names[1] == "bar")
def test_for_object_scalar_creation(self, level=rlevel):
# Ticket #816
a = np.object_()
b = np.object_(3)
b2 = np.object_(3.0)
c = np.object_([4, 5])
d = np.object_([None, {}, []])
assert_(a is None)
assert_(type(b) is int)
assert_(type(b2) is float)
assert_(type(c) is np.ndarray)
assert_(c.dtype == object)
assert_(d.dtype == object)
def test_array_resize_method_system_error(self):
# Ticket #840 - order should be an invalid keyword.
x = np.array([[0, 1], [2, 3]])
self.assertRaises(TypeError, x.resize, (2, 2), order='C')
def test_for_zero_length_in_choose(self, level=rlevel):
"Ticket #882"
a = np.array(1)
self.assertRaises(ValueError, lambda x: x.choose([]), a)
def test_array_ndmin_overflow(self):
"Ticket #947."
self.assertRaises(ValueError, lambda: np.array([1], ndmin=33))
def test_void_scalar_with_titles(self, level=rlevel):
# No ticket
data = [('john', 4), ('mary', 5)]
dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]
arr = np.array(data, dtype=dtype1)
assert_(arr[0][0] == 'john')
assert_(arr[0][1] == 4)
def test_void_scalar_constructor(self):
#Issue #1550
#Create test string data, construct void scalar from data and assert
#that void scalar contains original data.
test_string = np.array("test")
test_string_void_scalar = np.core.multiarray.scalar(
np.dtype(("V", test_string.dtype.itemsize)), test_string.tobytes())
assert_(test_string_void_scalar.view(test_string.dtype) == test_string)
#Create record scalar, construct from data and assert that
#reconstructed scalar is correct.
test_record = np.ones((), "i,i")
test_record_void_scalar = np.core.multiarray.scalar(
test_record.dtype, test_record.tobytes())
assert_(test_record_void_scalar == test_record)
#Test pickle and unpickle of void and record scalars
assert_(pickle.loads(pickle.dumps(test_string)) == test_string)
assert_(pickle.loads(pickle.dumps(test_record)) == test_record)
def test_blasdot_uninitialized_memory(self):
# Ticket #950
for m in [0, 1, 2]:
for n in [0, 1, 2]:
for k in range(3):
# Try to ensure that x->data contains non-zero floats
x = np.array([123456789e199], dtype=np.float64)
if IS_PYPY:
x.resize((m, 0), refcheck=False)
else:
x.resize((m, 0))
y = np.array([123456789e199], dtype=np.float64)
if IS_PYPY:
y.resize((0, n), refcheck=False)
else:
y.resize((0, n))
# `dot` should just return zero (m, n) matrix
z = np.dot(x, y)
assert_(np.all(z == 0))
assert_(z.shape == (m, n))
def test_zeros(self):
# Regression test for #1061.
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed dimension exceeded'
try:
np.empty(sz)
except ValueError as e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception as e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_huge_arange(self):
# Regression test for #1062.
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed size exceeded'
try:
np.arange(sz)
self.assertTrue(np.size == sz)
except ValueError as e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception as e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_fromiter_bytes(self):
# Ticket #1058
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
def test_array_from_sequence_scalar_array(self):
# Ticket #1078: segfaults when creating an array with a sequence of
# 0d arrays.
a = np.array((np.ones(2), np.array(2)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], np.ones(2))
assert_equal(a[1], np.array(2))
a = np.array(((1,), np.array(1)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], (1,))
assert_equal(a[1], np.array(1))
def test_array_from_sequence_scalar_array2(self):
# Ticket #1081: weird array with strange input...
t = np.array([np.array([]), np.array(0, object)])
assert_equal(t.shape, (2,))
assert_equal(t.dtype, np.dtype(object))
def test_array_too_big(self):
# Ticket #1080.
assert_raises(ValueError, np.zeros, [975]*7, np.int8)
assert_raises(ValueError, np.zeros, [26244]*5, np.int8)
def test_dtype_keyerrors_(self):
# Ticket #1106.
dt = np.dtype([('f1', np.uint)])
assert_raises(KeyError, dt.__getitem__, "f2")
assert_raises(IndexError, dt.__getitem__, 1)
assert_raises(ValueError, dt.__getitem__, 0.0)
def test_lexsort_buffer_length(self):
# Ticket #1217, don't segfault.
a = np.ones(100, dtype=np.int8)
b = np.ones(100, dtype=np.int32)
i = np.lexsort((a[::-1], b))
assert_equal(i, np.arange(100, dtype=np.int))
def test_object_array_to_fixed_string(self):
# Ticket #1235.
a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)
b = np.array(a, dtype=(np.str_, 8))
assert_equal(a, b)
c = np.array(a, dtype=(np.str_, 5))
assert_equal(c, np.array(['abcde', 'ijklm']))
d = np.array(a, dtype=(np.str_, 12))
assert_equal(a, d)
e = np.empty((2, ), dtype=(np.str_, 8))
e[:] = a[:]
assert_equal(a, e)
def test_unicode_to_string_cast(self):
# Ticket #1240.
a = np.array([[sixu('abc'), sixu('\u03a3')],
[sixu('asdf'), sixu('erw')]],
dtype='U')
self.assertRaises(UnicodeEncodeError, np.array, a, 'S4')
def test_mixed_string_unicode_array_creation(self):
a = np.array(['1234', sixu('123')])
assert_(a.itemsize == 16)
a = np.array([sixu('123'), '1234'])
assert_(a.itemsize == 16)
a = np.array(['1234', sixu('123'), '12345'])
assert_(a.itemsize == 20)
a = np.array([sixu('123'), '1234', sixu('12345')])
assert_(a.itemsize == 20)
a = np.array([sixu('123'), '1234', sixu('1234')])
assert_(a.itemsize == 16)
def test_misaligned_objects_segfault(self):
# Ticket #1198 and #1267
a1 = np.zeros((10,), dtype='O,c')
a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')
a1['f0'] = a2
repr(a1)
np.argmax(a1['f0'])
a1['f0'][1] = "FOO"
a1['f0'] = "FOO"
np.array(a1['f0'], dtype='S')
np.nonzero(a1['f0'])
a1.sort()
copy.deepcopy(a1)
def test_misaligned_scalars_segfault(self):
# Ticket #1267
s1 = np.array(('a', 'Foo'), dtype='c,O')
s2 = np.array(('b', 'Bar'), dtype='c,O')
s1['f1'] = s2['f1']
s1['f1'] = 'Baz'
def test_misaligned_dot_product_objects(self):
# Ticket #1267
# This didn't require a fix, but it's worth testing anyway, because
# it may fail if .dot stops enforcing the arrays to be BEHAVED
a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')
b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')
np.dot(a['f0'], b['f0'])
def test_byteswap_complex_scalar(self):
# Ticket #1259 and gh-441
for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]:
z = np.array([2.2-1.1j], dtype)
x = z[0] # always native-endian
y = x.byteswap()
if x.dtype.byteorder == z.dtype.byteorder:
# little-endian machine
assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype.newbyteorder()))
else:
# big-endian machine
assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype))
# double check real and imaginary parts:
assert_equal(x.real, y.real.byteswap())
assert_equal(x.imag, y.imag.byteswap())
def test_structured_arrays_with_objects1(self):
# Ticket #1299
stra = 'aaaa'
strb = 'bbbb'
x = np.array([[(0, stra), (1, strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(x[0, 1] == x[0, 0])
@dec.skipif(not HAS_REFCOUNT, "python has no sys.getrefcount")
def test_structured_arrays_with_objects2(self):
# Ticket #1299 second test
stra = 'aaaa'
strb = 'bbbb'
numb = sys.getrefcount(strb)
numa = sys.getrefcount(stra)
x = np.array([[(0, stra), (1, strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(sys.getrefcount(strb) == numb)
assert_(sys.getrefcount(stra) == numa + 2)
def test_duplicate_title_and_name(self):
# Ticket #1254
dtspec = [(('a', 'a'), 'i'), ('b', 'i')]
self.assertRaises(ValueError, np.dtype, dtspec)
def test_signed_integer_division_overflow(self):
# Ticket #1317.
def test_type(t):
min = np.array([np.iinfo(t).min])
min //= -1
with np.errstate(divide="ignore"):
for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long):
test_type(t)
def test_buffer_hashlib(self):
try:
from hashlib import md5
except ImportError:
from md5 import new as md5
x = np.array([1, 2, 3], dtype=np.dtype('<i4'))
assert_equal(md5(x).hexdigest(), '2a1dd1e1e59d0a384c26951e316cd7e6')
def test_0d_string_scalar(self):
# Bug #1436; the following should succeed
np.asarray('x', '>c')
def test_log1p_compiler_shenanigans(self):
# Check if log1p is behaving on 32 bit intel systems.
assert_(np.isfinite(np.log1p(np.exp2(-53))))
def test_fromiter_comparison(self, level=rlevel):
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
def test_fromstring_crash(self):
# Ticket #1345: the following should not cause a crash
np.fromstring(asbytes('aa, aa, 1.0'), sep=',')
def test_ticket_1539(self):
dtypes = [x for x in np.typeDict.values()
if (issubclass(x, np.number)
and not issubclass(x, np.timedelta64))]
a = np.array([], np.bool_) # not x[0] because it is unordered
failures = []
for x in dtypes:
b = a.astype(x)
for y in dtypes:
c = a.astype(y)
try:
np.dot(b, c)
except TypeError:
failures.append((x, y))
if failures:
raise AssertionError("Failures: %r" % failures)
def test_ticket_1538(self):
x = np.finfo(np.float32)
for name in 'eps epsneg max min resolution tiny'.split():
assert_equal(type(getattr(x, name)), np.float32,
err_msg=name)
def test_ticket_1434(self):
# Check that the out= argument in var and std has an effect
data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9)))
out = np.zeros((3,))
ret = data.var(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.var(axis=1))
ret = data.std(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.std(axis=1))
def test_complex_nan_maximum(self):
cnan = complex(0, np.nan)
assert_equal(np.maximum(1, cnan), cnan)
def test_subclass_int_tuple_assignment(self):
# ticket #1563
class Subclass(np.ndarray):
def __new__(cls, i):
return np.ones((i,)).view(cls)
x = Subclass(5)
x[(0,)] = 2 # shouldn't raise an exception
assert_equal(x[0], 2)
def test_ufunc_no_unnecessary_views(self):
# ticket #1548
class Subclass(np.ndarray):
pass
x = np.array([1, 2, 3]).view(Subclass)
y = np.add(x, x, x)
assert_equal(id(x), id(y))
@dec.skipif(not HAS_REFCOUNT, "python has no sys.getrefcount")
def test_take_refcount(self):
# ticket #939
a = np.arange(16, dtype=np.float)
a.shape = (4, 4)
lut = np.ones((5 + 3, 4), np.float)
rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)
c1 = sys.getrefcount(rgba)
try:
lut.take(a, axis=0, mode='clip', out=rgba)
except TypeError:
pass
c2 = sys.getrefcount(rgba)
assert_equal(c1, c2)
def test_fromfile_tofile_seeks(self):
# On Python 3, tofile/fromfile used to get (#1610) the Python
# file handle out of sync
f0 = tempfile.NamedTemporaryFile()
f = f0.file
f.write(np.arange(255, dtype='u1').tobytes())
f.seek(20)
ret = np.fromfile(f, count=4, dtype='u1')
assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1'))
assert_equal(f.tell(), 24)
f.seek(40)
np.array([1, 2, 3], dtype='u1').tofile(f)
assert_equal(f.tell(), 43)
f.seek(40)
data = f.read(3)
assert_equal(data, asbytes("\x01\x02\x03"))
f.seek(80)
f.read(4)
data = np.fromfile(f, dtype='u1', count=4)
assert_equal(data, np.array([84, 85, 86, 87], dtype='u1'))
f.close()
def test_complex_scalar_warning(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_warns(np.ComplexWarning, float, x)
with suppress_warnings() as sup:
sup.filter(np.ComplexWarning)
assert_equal(float(x), float(x.real))
def test_complex_scalar_complex_cast(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_equal(complex(x), 1+2j)
def test_complex_boolean_cast(self):
# Ticket #2218
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp)
assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool))
assert_(np.any(x))
assert_(np.all(x[1:]))
def test_uint_int_conversion(self):
x = 2**64 - 1
assert_equal(int(np.uint64(x)), x)
def test_duplicate_field_names_assign(self):
ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8')
ra.dtype.names = ('f1', 'f2')
repr(ra) # should not cause a segmentation fault
assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))
def test_eq_string_and_object_array(self):
# From e-mail thread "__eq__ with str and object" (Keith Goodman)
a1 = np.array(['a', 'b'], dtype=object)
a2 = np.array(['a', 'c'])
assert_array_equal(a1 == a2, [True, False])
assert_array_equal(a2 == a1, [True, False])
def test_nonzero_byteswap(self):
a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)
a.dtype = np.float32
assert_equal(a.nonzero()[0], [1])
a = a.byteswap().newbyteorder()
assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap
def test_find_common_type_boolean(self):
# Ticket #1695
assert_(np.find_common_type([], ['?', '?']) == '?')
def test_empty_mul(self):
a = np.array([1.])
a[1:1] *= 2
assert_equal(a, [1.])
def test_array_side_effect(self):
# The second use of itemsize was throwing an exception because in
# ctors.c, discover_itemsize was calling PyObject_Length without
# checking the return code. This failed to get the length of the
# number 2, and the exception hung around until something checked
# PyErr_Occurred() and returned an error.
assert_equal(np.dtype('S10').itemsize, 10)
np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_)
assert_equal(np.dtype('S10').itemsize, 10)
def test_any_float(self):
# all and any for floats
a = np.array([0.1, 0.9])
assert_(np.any(a))
assert_(np.all(a))
def test_large_float_sum(self):
a = np.arange(10000, dtype='f')
assert_equal(a.sum(dtype='d'), a.astype('d').sum())
def test_ufunc_casting_out(self):
a = np.array(1.0, dtype=np.float32)
b = np.array(1.0, dtype=np.float64)
c = np.array(1.0, dtype=np.float32)
np.add(a, b, out=c)
assert_equal(c, 2.0)
def test_array_scalar_contiguous(self):
# Array scalars are both C and Fortran contiguous
assert_(np.array(1.0).flags.c_contiguous)
assert_(np.array(1.0).flags.f_contiguous)
assert_(np.array(np.float32(1.0)).flags.c_contiguous)
assert_(np.array(np.float32(1.0)).flags.f_contiguous)
def test_squeeze_contiguous(self):
# Similar to GitHub issue #387
a = np.zeros((1, 2)).squeeze()
b = np.zeros((2, 2, 2), order='F')[:, :, ::2].squeeze()
assert_(a.flags.c_contiguous)
assert_(a.flags.f_contiguous)
assert_(b.flags.f_contiguous)
def test_reduce_contiguous(self):
# GitHub issue #387
a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1))
b = np.add.reduce(np.zeros((2, 1, 2)), 1)
assert_(a.flags.c_contiguous)
assert_(a.flags.f_contiguous)
assert_(b.flags.c_contiguous)
def test_object_array_self_reference(self):
# Object arrays with references to themselves can cause problems
a = np.array(0, dtype=object)
a[()] = a
assert_raises(TypeError, int, a)
assert_raises(TypeError, long, a)
assert_raises(TypeError, float, a)
assert_raises(TypeError, oct, a)
assert_raises(TypeError, hex, a)
# Test the same for a circular reference.
b = np.array(a, dtype=object)
a[()] = b
assert_raises(TypeError, int, a)
# NumPy has no tp_traverse currently, so circular references
# cannot be detected. So resolve it:
a[()] = 0
# This was causing a to become like the above
a = np.array(0, dtype=object)
a[...] += 1
assert_equal(a, 1)
def test_object_array_self_copy(self):
# An object array being copied into itself DECREF'ed before INCREF'ing
# causing segmentation faults (gh-3787)
a = np.array(object(), dtype=object)
np.copyto(a, a)
if HAS_REFCOUNT:
assert_(sys.getrefcount(a[()]) == 2)
a[()].__class__ # will segfault if object was deleted
def test_zerosize_accumulate(self):
"Ticket #1733"
x = np.array([[42, 0]], dtype=np.uint32)
assert_equal(np.add.accumulate(x[:-1, 0]), [])
def test_objectarray_setfield(self):
# Setfield should not overwrite Object fields with non-Object data
x = np.array([1, 2, 3], dtype=object)
assert_raises(TypeError, x.setfield, 4, np.int32, 0)
def test_setting_rank0_string(self):
"Ticket #1736"
s1 = asbytes("hello1")
s2 = asbytes("hello2")
a = np.zeros((), dtype="S10")
a[()] = s1
assert_equal(a, np.array(s1))
a[()] = np.array(s2)
assert_equal(a, np.array(s2))
a = np.zeros((), dtype='f4')
a[()] = 3
assert_equal(a, np.array(3))
a[()] = np.array(4)
assert_equal(a, np.array(4))
def test_string_astype(self):
"Ticket #1748"
s1 = asbytes('black')
s2 = asbytes('white')
s3 = asbytes('other')
a = np.array([[s1], [s2], [s3]])
assert_equal(a.dtype, np.dtype('S5'))
b = a.astype(np.dtype('S0'))
assert_equal(b.dtype, np.dtype('S5'))
def test_ticket_1756(self):
# Ticket #1756
s = asbytes('0123456789abcdef')
a = np.array([s]*5)
for i in range(1, 17):
a1 = np.array(a, "|S%d" % i)
a2 = np.array([s[:i]]*5)
assert_equal(a1, a2)
def test_fields_strides(self):
"Ticket #1760"
r = np.fromstring('abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')
assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2])
assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1'])
assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])
assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides)
def test_alignment_update(self):
# Check that alignment flag is updated on stride setting
a = np.arange(10)
assert_(a.flags.aligned)
a.strides = 3
assert_(not a.flags.aligned)
def test_ticket_1770(self):
"Should not segfault on python 3k"
import numpy as np
try:
a = np.zeros((1,), dtype=[('f1', 'f')])
a['f1'] = 1
a['f2'] = 1
except ValueError:
pass
except:
raise AssertionError
def test_ticket_1608(self):
"x.flat shouldn't modify data"
x = np.array([[1, 2], [3, 4]]).T
np.array(x.flat)
assert_equal(x, [[1, 3], [2, 4]])
def test_pickle_string_overwrite(self):
import re
data = np.array([1], dtype='b')
blob = pickle.dumps(data, protocol=1)
data = pickle.loads(blob)
# Check that loads does not clobber interned strings
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
data[0] = 0xbb
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
def test_pickle_bytes_overwrite(self):
if sys.version_info[0] >= 3:
data = np.array([1], dtype='b')
data = pickle.loads(pickle.dumps(data))
data[0] = 0xdd
bytestring = "\x01 ".encode('ascii')
assert_equal(bytestring[0:1], '\x01'.encode('ascii'))
def test_pickle_py2_array_latin1_hack(self):
# Check that unpickling hacks in Py3 that support
# encoding='latin1' work correctly.
# Python2 output for pickle.dumps(numpy.array([129], dtype='b'))
data = asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\n"
"tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n"
"I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n"
"p13\ntp14\nb.")
if sys.version_info[0] >= 3:
# This should work:
result = pickle.loads(data, encoding='latin1')
assert_array_equal(result, np.array([129], dtype='b'))
# Should not segfault:
assert_raises(Exception, pickle.loads, data, encoding='koi8-r')
def test_pickle_py2_scalar_latin1_hack(self):
# Check that scalar unpickling hack in Py3 that supports
# encoding='latin1' work correctly.
# Python2 output for pickle.dumps(...)
datas = [
# (original, python2_pickle, koi8r_validity)
(np.unicode_('\u6bd2'),
asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\n"
"tp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n."),
'invalid'),
(np.float64(9e123),
asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\n"
"p2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\n"
"bS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n."),
'invalid'),
(np.bytes_(asbytes('\x9c')), # different 8-bit code point in KOI8-R vs latin1
asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\n"
"I0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\n"
"tp8\nRp9\n."),
'different'),
]
if sys.version_info[0] >= 3:
for original, data, koi8r_validity in datas:
result = pickle.loads(data, encoding='latin1')
assert_equal(result, original)
# Decoding under non-latin1 encoding (e.g.) KOI8-R can
# produce bad results, but should not segfault.
if koi8r_validity == 'different':
# Unicode code points happen to lie within latin1,
# but are different in koi8-r, resulting to silent
# bogus results
result = pickle.loads(data, encoding='koi8-r')
assert_(result != original)
elif koi8r_validity == 'invalid':
# Unicode code points outside latin1, so results
# to an encoding exception
assert_raises(ValueError, pickle.loads, data, encoding='koi8-r')
else:
raise ValueError(koi8r_validity)
def test_structured_type_to_object(self):
a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8')
a_obj = np.empty((2,), dtype=object)
a_obj[0] = (0, 1)
a_obj[1] = (3, 2)
# astype records -> object
assert_equal(a_rec.astype(object), a_obj)
# '=' records -> object
b = np.empty_like(a_obj)
b[...] = a_rec
assert_equal(b, a_obj)
# '=' object -> records
b = np.empty_like(a_rec)
b[...] = a_obj
assert_equal(b, a_rec)
def test_assign_obj_listoflists(self):
# Ticket # 1870
# The inner list should get assigned to the object elements
a = np.zeros(4, dtype=object)
b = a.copy()
a[0] = [1]
a[1] = [2]
a[2] = [3]
a[3] = [4]
b[...] = [[1], [2], [3], [4]]
assert_equal(a, b)
# The first dimension should get broadcast
a = np.zeros((2, 2), dtype=object)
a[...] = [[1, 2]]
assert_equal(a, [[1, 2], [1, 2]])
def test_memoryleak(self):
# Ticket #1917 - ensure that array data doesn't leak
for i in range(1000):
# 100MB times 1000 would give 100GB of memory usage if it leaks
a = np.empty((100000000,), dtype='i1')
del a
@dec.skipif(not HAS_REFCOUNT, "python has no sys.getrefcount")
def test_ufunc_reduce_memoryleak(self):
a = np.arange(6)
acnt = sys.getrefcount(a)
np.add.reduce(a)
assert_equal(sys.getrefcount(a), acnt)
def test_search_sorted_invalid_arguments(self):
# Ticket #2021, should not segfault.
x = np.arange(0, 4, dtype='datetime64[D]')
assert_raises(TypeError, x.searchsorted, 1)
def test_string_truncation(self):
# Ticket #1990 - Data can be truncated in creation of an array from a
# mixed sequence of numeric values and strings
for val in [True, 1234, 123.4, complex(1, 234)]:
for tostr in [asunicode, asbytes]:
b = np.array([val, tostr('xx')])
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xx'), val])
assert_equal(tostr(b[1]), tostr(val))
# test also with longer strings
b = np.array([val, tostr('xxxxxxxxxx')])
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xxxxxxxxxx'), val])
assert_equal(tostr(b[1]), tostr(val))
def test_string_truncation_ucs2(self):
# Ticket #2081. Python compiled with two byte unicode
# can lead to truncation if itemsize is not properly
# adjusted for NumPy's four byte unicode.
if sys.version_info[0] >= 3:
a = np.array(['abcd'])
else:
a = np.array([sixu('abcd')])
assert_equal(a.dtype.itemsize, 16)
def test_unique_stable(self):
# Ticket #2063 must always choose stable sort for argsort to
# get consistent results
v = np.array(([0]*5 + [1]*6 + [2]*6)*4)
res = np.unique(v, return_index=True)
tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11]))
assert_equal(res, tgt)
def test_unicode_alloc_dealloc_match(self):
# Ticket #1578, the mismatch only showed up when running
# python-debug for python versions >= 2.7, and then as
# a core dump and error message.
a = np.array(['abc'], dtype=np.unicode)[0]
del a
def test_refcount_error_in_clip(self):
# Ticket #1588
a = np.zeros((2,), dtype='>i2').clip(min=0)
x = a + a
# This used to segfault:
y = str(x)
# Check the final string:
assert_(y == "[0 0]")
def test_searchsorted_wrong_dtype(self):
# Ticket #2189, it used to segfault, so we check that it raises the
# proper exception.
a = np.array([('a', 1)], dtype='S1, int')
assert_raises(TypeError, np.searchsorted, a, 1.2)
# Ticket #2066, similar problem:
dtype = np.format_parser(['i4', 'i4'], [], [])
a = np.recarray((2, ), dtype)
assert_raises(TypeError, np.searchsorted, a, 1)
def test_complex64_alignment(self):
# Issue gh-2668 (trac 2076), segfault on sparc due to misalignment
dtt = np.complex64
arr = np.arange(10, dtype=dtt)
# 2D array
arr2 = np.reshape(arr, (2, 5))
# Fortran write followed by (C or F) read caused bus error
data_str = arr2.tobytes('F')
data_back = np.ndarray(arr2.shape,
arr2.dtype,
buffer=data_str,
order='F')
assert_array_equal(arr2, data_back)
def test_structured_count_nonzero(self):
arr = np.array([0, 1]).astype('i4, (2)i4')[:1]
count = np.count_nonzero(arr)
assert_equal(count, 0)
def test_copymodule_preserves_f_contiguity(self):
a = np.empty((2, 2), order='F')
b = copy.copy(a)
c = copy.deepcopy(a)
assert_(b.flags.fortran)
assert_(b.flags.f_contiguous)
assert_(c.flags.fortran)
assert_(c.flags.f_contiguous)
def test_fortran_order_buffer(self):
import numpy as np
a = np.array([['Hello', 'Foob']], dtype='U5', order='F')
arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a)
arr2 = np.array([[[sixu('H'), sixu('e'), sixu('l'), sixu('l'), sixu('o')],
[sixu('F'), sixu('o'), sixu('o'), sixu('b'), sixu('')]]])
assert_array_equal(arr, arr2)
def test_assign_from_sequence_error(self):
# Ticket #4024.
arr = np.array([1, 2, 3])
assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9])
arr.__setitem__(slice(None), [9])
assert_equal(arr, [9, 9, 9])
def test_format_on_flex_array_element(self):
# Ticket #4369.
dt = np.dtype([('date', '<M8[D]'), ('val', '<f8')])
arr = np.array([('2000-01-01', 1)], dt)
formatted = '{0}'.format(arr[0])
assert_equal(formatted, str(arr[0]))
def test_deepcopy_on_0d_array(self):
# Ticket #3311.
arr = np.array(3)
arr_cp = copy.deepcopy(arr)
assert_equal(arr, arr_cp)
assert_equal(arr.shape, arr_cp.shape)
assert_equal(int(arr), int(arr_cp))
self.assertTrue(arr is not arr_cp)
self.assertTrue(isinstance(arr_cp, type(arr)))
def test_deepcopy_F_order_object_array(self):
# Ticket #6456.
a = {'a': 1}
b = {'b': 2}
arr = np.array([[a, b], [a, b]], order='F')
arr_cp = copy.deepcopy(arr)
assert_equal(arr, arr_cp)
self.assertTrue(arr is not arr_cp)
# Ensure that we have actually copied the item.
self.assertTrue(arr[0, 1] is not arr_cp[1, 1])
# Ensure we are allowed to have references to the same object.
self.assertTrue(arr[0, 1] is arr[1, 1])
# Check the references hold for the copied objects.
self.assertTrue(arr_cp[0, 1] is arr_cp[1, 1])
def test_bool_subscript_crash(self):
# gh-4494
c = np.rec.array([(1, 2, 3), (4, 5, 6)])
masked = c[np.array([True, False])]
base = masked.base
del masked, c
base.dtype
def test_richcompare_crash(self):
# gh-4613
import operator as op
# dummy class where __array__ throws exception
class Foo(object):
__array_priority__ = 1002
def __array__(self, *args, **kwargs):
raise Exception()
rhs = Foo()
lhs = np.array(1)
for f in [op.lt, op.le, op.gt, op.ge]:
if sys.version_info[0] >= 3:
assert_raises(TypeError, f, lhs, rhs)
else:
f(lhs, rhs)
assert_(not op.eq(lhs, rhs))
assert_(op.ne(lhs, rhs))
def test_richcompare_scalar_and_subclass(self):
# gh-4709
class Foo(np.ndarray):
def __eq__(self, other):
return "OK"
x = np.array([1, 2, 3]).view(Foo)
assert_equal(10 == x, "OK")
assert_equal(np.int32(10) == x, "OK")
assert_equal(np.array([10]) == x, "OK")
def test_pickle_empty_string(self):
# gh-3926
import pickle
test_string = np.string_('')
assert_equal(pickle.loads(pickle.dumps(test_string)), test_string)
def test_frompyfunc_many_args(self):
# gh-5672
def passer(*args):
pass
assert_raises(ValueError, np.frompyfunc, passer, 32, 1)
def test_repeat_broadcasting(self):
# gh-5743
a = np.arange(60).reshape(3, 4, 5)
for axis in chain(range(-a.ndim, a.ndim), [None]):
assert_equal(a.repeat(2, axis=axis), a.repeat([2], axis=axis))
def test_frompyfunc_nout_0(self):
# gh-2014
def f(x):
x[0], x[-1] = x[-1], x[0]
uf = np.frompyfunc(f, 1, 0)
a = np.array([[1, 2, 3], [4, 5], [6, 7, 8, 9]])
assert_equal(uf(a), ())
assert_array_equal(a, [[3, 2, 1], [5, 4], [9, 7, 8, 6]])
@dec.skipif(not HAS_REFCOUNT, "python has no sys.getrefcount")
def test_leak_in_structured_dtype_comparison(self):
# gh-6250
recordtype = np.dtype([('a', np.float64),
('b', np.int32),
('d', (np.str, 5))])
# Simple case
a = np.zeros(2, dtype=recordtype)
for i in range(100):
a == a
assert_(sys.getrefcount(a) < 10)
# The case in the bug report.
before = sys.getrefcount(a)
u, v = a[0], a[1]
u == v
del u, v
gc.collect()
after = sys.getrefcount(a)
assert_equal(before, after)
def test_empty_percentile(self):
# gh-6530 / gh-6553
assert_array_equal(np.percentile(np.arange(10), []), np.array([]))
def test_void_compare_segfault(self):
# gh-6922. The following should not segfault
a = np.ones(3, dtype=[('object', 'O'), ('int', '<i2')])
a.sort()
def test_reshape_size_overflow(self):
# gh-7455
a = np.ones(20)[::2]
if np.dtype(np.intp).itemsize == 8:
# 64 bit. The following are the prime factors of 2**63 + 5,
# plus a leading 2, so when multiplied together as int64,
# the result overflows to a total size of 10.
new_shape = (2, 13, 419, 691, 823, 2977518503)
else:
# 32 bit. The following are the prime factors of 2**31 + 5,
# plus a leading 2, so when multiplied together as int32,
# the result overflows to a total size of 10.
new_shape = (2, 7, 7, 43826197)
assert_raises(ValueError, a.reshape, new_shape)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
vine/luigi | test/server_test.py | 13 | 3810 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import multiprocessing
import random
import signal
import time
import tempfile
from helpers import unittest, with_config
import luigi.rpc
import luigi.server
from luigi.scheduler import CentralPlannerScheduler
from tornado.testing import AsyncHTTPTestCase
class ServerTestBase(AsyncHTTPTestCase):
def get_app(self):
return luigi.server.app(CentralPlannerScheduler())
def setUp(self):
super(ServerTestBase, self).setUp()
self._old_fetch = luigi.rpc.RemoteScheduler._fetch
def _fetch(obj, url, body, *args, **kwargs):
response = self.fetch(url, body=body, method='POST')
if response.code >= 400:
raise luigi.rpc.RPCError(
'Errror when connecting to remote scheduler'
)
return response.body.decode('utf-8')
luigi.rpc.RemoteScheduler._fetch = _fetch
def tearDown(self):
super(ServerTestBase, self).tearDown()
luigi.rpc.RemoteScheduler._fetch = self._old_fetch
class ServerTest(ServerTestBase):
def test_visualizer(self):
page = self.fetch('/').body
self.assertTrue(page.find(b'<title>') != -1)
def _test_404(self, path):
response = self.fetch(path)
self.assertEqual(response.code, 404)
def test_404(self):
self._test_404('/foo')
def test_api_404(self):
self._test_404('/api/foo')
class ServerTestRun(unittest.TestCase):
"""Test to start and stop the server in a more "standard" way
"""
def run_server(self):
luigi.server.run(api_port=self._api_port, address='127.0.0.1')
def start_server(self):
self._api_port = random.randint(1024, 9999)
self._process = multiprocessing.Process(target=self.run_server)
self._process.start()
time.sleep(0.1) # wait for server to start
self.sch = luigi.rpc.RemoteScheduler(host='localhost', port=self._api_port)
self.sch._wait = lambda: None
def stop_server(self):
self._process.terminate()
self._process.join(1)
if self._process.is_alive():
os.kill(self._process.pid, signal.SIGKILL)
def setUp(self):
state_path = tempfile.mktemp(suffix=self.id())
luigi.configuration.get_config().set('scheduler', 'state_path', state_path)
self.start_server()
def tearDown(self):
self.stop_server()
def test_ping(self):
self.sch.ping(worker='xyz')
def test_raw_ping(self):
self.sch._request('/api/ping', {'worker': 'xyz'})
def test_raw_ping_extended(self):
self.sch._request('/api/ping', {'worker': 'xyz', 'foo': 'bar'})
def test_404(self):
with self.assertRaises(luigi.rpc.RPCError):
self.sch._request('/api/fdsfds', {'dummy': 1})
def test_save_state(self):
self.sch.add_task('X', 'B', deps=('A',))
self.sch.add_task('X', 'A')
self.assertEqual(self.sch.get_work('X')['task_id'], 'A')
self.stop_server()
self.start_server()
work = self.sch.get_work('X')['running_tasks'][0]
self.assertEqual(work['task_id'], 'A')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
petewarden/tensorflow_makefile | tensorflow/python/framework/dtypes.py | 10 | 17305 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library of dtypes (Tensor element types)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import types_pb2
class DType(object):
"""Represents the type of the elements in a `Tensor`.
The following `DType` objects are defined:
* `tf.float16`: 16-bit half-precision floating-point.
* `tf.float32`: 32-bit single-precision floating-point.
* `tf.float64`: 64-bit double-precision floating-point.
* `tf.bfloat16`: 16-bit truncated floating-point.
* `tf.complex64`: 64-bit single-precision complex.
* `tf.complex128`: 128-bit double-precision complex.
* `tf.int8`: 8-bit signed integer.
* `tf.uint8`: 8-bit unsigned integer.
* `tf.uint16`: 16-bit unsigned integer.
* `tf.int16`: 16-bit signed integer.
* `tf.int32`: 32-bit signed integer.
* `tf.int64`: 64-bit signed integer.
* `tf.bool`: Boolean.
* `tf.string`: String.
* `tf.qint8`: Quantized 8-bit signed integer.
* `tf.quint8`: Quantized 8-bit unsigned integer.
* `tf.qint16`: Quantized 16-bit signed integer.
* `tf.quint16`: Quantized 16-bit unsigned integer.
* `tf.qint32`: Quantized 32-bit signed integer.
In addition, variants of these types with the `_ref` suffix are
defined for reference-typed tensors.
The `tf.as_dtype()` function converts numpy types and string type
names to a `DType` object.
@@is_compatible_with
@@name
@@base_dtype
@@real_dtype
@@is_ref_dtype
@@as_ref
@@is_floating
@@is_complex
@@is_integer
@@is_quantized
@@is_unsigned
@@as_numpy_dtype
@@as_datatype_enum
"""
def __init__(self, type_enum):
"""Creates a new `DataType`.
NOTE(mrry): In normal circumstances, you should not need to
construct a `DataType` object directly. Instead, use the
`tf.as_dtype()` function.
Args:
type_enum: A `types_pb2.DataType` enum value.
Raises:
TypeError: If `type_enum` is not a value `types_pb2.DataType`.
"""
# TODO(mrry): Make the necessary changes (using __new__) to ensure
# that calling this returns one of the interned values.
type_enum = int(type_enum)
if (type_enum not in types_pb2.DataType.values()
or type_enum == types_pb2.DT_INVALID):
raise TypeError(
"type_enum is not a valid types_pb2.DataType: %s" % type_enum)
self._type_enum = type_enum
@property
def is_ref_dtype(self):
"""Returns `True` if this `DType` represents a reference type."""
return self._type_enum > 100
@property
def as_ref(self):
"""Returns a reference `DType` based on this `DType`."""
if self.is_ref_dtype:
return self
else:
return _INTERN_TABLE[self._type_enum + 100]
@property
def base_dtype(self):
"""Returns a non-reference `DType` based on this `DType`."""
if self.is_ref_dtype:
return _INTERN_TABLE[self._type_enum - 100]
else:
return self
@property
def real_dtype(self):
"""Returns the dtype correspond to this dtype's real part."""
base = self.base_dtype
if base == complex64:
return float32
elif base == complex128:
return float64
else:
return self
@property
def as_numpy_dtype(self):
"""Returns a `numpy.dtype` based on this `DType`."""
return _TF_TO_NP[self._type_enum]
@property
def as_datatype_enum(self):
"""Returns a `types_pb2.DataType` enum value based on this `DType`."""
return self._type_enum
@property
def is_integer(self):
"""Returns whether this is a (non-quantized) integer type."""
return (not self.is_quantized and
issubclass(self.as_numpy_dtype, np.integer))
@property
def is_floating(self):
"""Returns whether this is a (real) floating point type."""
return issubclass(self.as_numpy_dtype, np.floating)
@property
def is_complex(self):
"""Returns whether this is a complex floating point type."""
return self.base_dtype in (complex64, complex128)
@property
def is_quantized(self):
"""Returns whether this is a quantized data type."""
return self.base_dtype in [qint8, quint8, qint16, quint16, qint32, bfloat16]
@property
def is_unsigned(self):
"""Returns whether this type is unsigned.
Non-numeric, unordered, and quantized types are not considered unsigned, and
this function returns `False`.
Returns:
Whether a `DType` is unsigned.
"""
try:
return self.min == 0
except TypeError:
return False
@property
def min(self):
"""Returns the minimum representable value in this data type.
Raises:
TypeError: if this is a non-numeric, unordered, or quantized type.
"""
if (self.is_quantized or self.base_dtype in
(bool, string, complex64, complex128)):
raise TypeError("Cannot find minimum value of %s." % self)
# there is no simple way to get the min value of a dtype, we have to check
# float and int types separately
try:
return np.finfo(self.as_numpy_dtype()).min
except: # bare except as possible raises by finfo not documented
try:
return np.iinfo(self.as_numpy_dtype()).min
except:
raise TypeError("Cannot find minimum value of %s." % self)
@property
def max(self):
"""Returns the maximum representable value in this data type.
Raises:
TypeError: if this is a non-numeric, unordered, or quantized type.
"""
if (self.is_quantized or self.base_dtype in
(bool, string, complex64, complex128)):
raise TypeError("Cannot find maximum value of %s." % self)
# there is no simple way to get the min value of a dtype, we have to check
# float and int types separately
try:
return np.finfo(self.as_numpy_dtype()).max
except: # bare except as possible raises by finfo not documented
try:
return np.iinfo(self.as_numpy_dtype()).max
except:
raise TypeError("Cannot find maximum value of %s." % self)
def is_compatible_with(self, other):
"""Returns True if the `other` DType will be converted to this DType.
The conversion rules are as follows:
```
DType(T) .is_compatible_with(DType(T)) == True
DType(T) .is_compatible_with(DType(T).as_ref) == True
DType(T).as_ref.is_compatible_with(DType(T)) == False
DType(T).as_ref.is_compatible_with(DType(T).as_ref) == True
```
Args:
other: A `DType` (or object that may be converted to a `DType`).
Returns:
True if a Tensor of the `other` `DType` will be implicitly converted to
this `DType`.
"""
other = as_dtype(other)
return self._type_enum in (
other.as_datatype_enum, other.base_dtype.as_datatype_enum)
def __eq__(self, other):
"""Returns True iff this DType refers to the same type as `other`."""
return (other is not None
and self._type_enum == as_dtype(other).as_datatype_enum)
def __ne__(self, other):
"""Returns True iff self != other."""
return not self.__eq__(other)
@property
def name(self):
"""Returns the string name for this `DType`."""
return _TYPE_TO_STRING[self._type_enum]
def __str__(self):
return "<dtype: %r>" % self.name
def __repr__(self):
return "tf." + self.name
def __hash__(self):
return self._type_enum
@property
def size(self):
return np.dtype(self.as_numpy_dtype).itemsize
# Define standard wrappers for the types_pb2.DataType enum.
float16 = DType(types_pb2.DT_HALF)
half = float16
float32 = DType(types_pb2.DT_FLOAT)
float64 = DType(types_pb2.DT_DOUBLE)
double = float64
int32 = DType(types_pb2.DT_INT32)
uint8 = DType(types_pb2.DT_UINT8)
uint16 = DType(types_pb2.DT_UINT16)
int16 = DType(types_pb2.DT_INT16)
int8 = DType(types_pb2.DT_INT8)
string = DType(types_pb2.DT_STRING)
complex64 = DType(types_pb2.DT_COMPLEX64)
complex128 = DType(types_pb2.DT_COMPLEX128)
int64 = DType(types_pb2.DT_INT64)
bool = DType(types_pb2.DT_BOOL)
qint8 = DType(types_pb2.DT_QINT8)
quint8 = DType(types_pb2.DT_QUINT8)
qint16 = DType(types_pb2.DT_QINT16)
quint16 = DType(types_pb2.DT_QUINT16)
qint32 = DType(types_pb2.DT_QINT32)
bfloat16 = DType(types_pb2.DT_BFLOAT16)
float16_ref = DType(types_pb2.DT_HALF_REF)
half_ref = float16_ref
float32_ref = DType(types_pb2.DT_FLOAT_REF)
float64_ref = DType(types_pb2.DT_DOUBLE_REF)
double_ref = float64_ref
int32_ref = DType(types_pb2.DT_INT32_REF)
uint8_ref = DType(types_pb2.DT_UINT8_REF)
uint16_ref = DType(types_pb2.DT_UINT16_REF)
int16_ref = DType(types_pb2.DT_INT16_REF)
int8_ref = DType(types_pb2.DT_INT8_REF)
string_ref = DType(types_pb2.DT_STRING_REF)
complex64_ref = DType(types_pb2.DT_COMPLEX64_REF)
complex128_ref = DType(types_pb2.DT_COMPLEX128_REF)
int64_ref = DType(types_pb2.DT_INT64_REF)
bool_ref = DType(types_pb2.DT_BOOL_REF)
qint8_ref = DType(types_pb2.DT_QINT8_REF)
quint8_ref = DType(types_pb2.DT_QUINT8_REF)
qint16_ref = DType(types_pb2.DT_QINT16_REF)
quint16_ref = DType(types_pb2.DT_QUINT16_REF)
qint32_ref = DType(types_pb2.DT_QINT32_REF)
bfloat16_ref = DType(types_pb2.DT_BFLOAT16_REF)
# Maintain an intern table so that we don't have to create a large
# number of small objects.
_INTERN_TABLE = {
types_pb2.DT_HALF: float16,
types_pb2.DT_FLOAT: float32,
types_pb2.DT_DOUBLE: float64,
types_pb2.DT_INT32: int32,
types_pb2.DT_UINT8: uint8,
types_pb2.DT_UINT16: uint16,
types_pb2.DT_INT16: int16,
types_pb2.DT_INT8: int8,
types_pb2.DT_STRING: string,
types_pb2.DT_COMPLEX64: complex64,
types_pb2.DT_COMPLEX128: complex128,
types_pb2.DT_INT64: int64,
types_pb2.DT_BOOL: bool,
types_pb2.DT_QINT8: qint8,
types_pb2.DT_QUINT8: quint8,
types_pb2.DT_QINT16: qint16,
types_pb2.DT_QUINT16: quint16,
types_pb2.DT_QINT32: qint32,
types_pb2.DT_BFLOAT16: bfloat16,
types_pb2.DT_HALF_REF: float16_ref,
types_pb2.DT_FLOAT_REF: float32_ref,
types_pb2.DT_DOUBLE_REF: float64_ref,
types_pb2.DT_INT32_REF: int32_ref,
types_pb2.DT_UINT8_REF: uint8_ref,
types_pb2.DT_UINT16_REF: uint16_ref,
types_pb2.DT_INT16_REF: int16_ref,
types_pb2.DT_INT8_REF: int8_ref,
types_pb2.DT_STRING_REF: string_ref,
types_pb2.DT_COMPLEX64_REF: complex64_ref,
types_pb2.DT_COMPLEX128_REF: complex128_ref,
types_pb2.DT_INT64_REF: int64_ref,
types_pb2.DT_BOOL_REF: bool_ref,
types_pb2.DT_QINT8_REF: qint8_ref,
types_pb2.DT_QUINT8_REF: quint8_ref,
types_pb2.DT_QINT16_REF: qint16_ref,
types_pb2.DT_QUINT16_REF: quint16_ref,
types_pb2.DT_QINT32_REF: qint32_ref,
types_pb2.DT_BFLOAT16_REF: bfloat16_ref,
}
# Standard mappings between types_pb2.DataType values and string names.
_TYPE_TO_STRING = {
types_pb2.DT_HALF: "float16",
types_pb2.DT_FLOAT: "float32",
types_pb2.DT_DOUBLE: "float64",
types_pb2.DT_INT32: "int32",
types_pb2.DT_UINT8: "uint8",
types_pb2.DT_UINT16: "uint16",
types_pb2.DT_INT16: "int16",
types_pb2.DT_INT8: "int8",
types_pb2.DT_STRING: "string",
types_pb2.DT_COMPLEX64: "complex64",
types_pb2.DT_COMPLEX128: "complex128",
types_pb2.DT_INT64: "int64",
types_pb2.DT_BOOL: "bool",
types_pb2.DT_QINT8: "qint8",
types_pb2.DT_QUINT8: "quint8",
types_pb2.DT_QINT16: "qint16",
types_pb2.DT_QUINT16: "quint16",
types_pb2.DT_QINT32: "qint32",
types_pb2.DT_BFLOAT16: "bfloat16",
types_pb2.DT_HALF_REF: "float16_ref",
types_pb2.DT_FLOAT_REF: "float32_ref",
types_pb2.DT_DOUBLE_REF: "float64_ref",
types_pb2.DT_INT32_REF: "int32_ref",
types_pb2.DT_UINT8_REF: "uint8_ref",
types_pb2.DT_UINT16_REF: "uint16_ref",
types_pb2.DT_INT16_REF: "int16_ref",
types_pb2.DT_INT8_REF: "int8_ref",
types_pb2.DT_STRING_REF: "string_ref",
types_pb2.DT_COMPLEX64_REF: "complex64_ref",
types_pb2.DT_COMPLEX128_REF: "complex128_ref",
types_pb2.DT_INT64_REF: "int64_ref",
types_pb2.DT_BOOL_REF: "bool_ref",
types_pb2.DT_QINT8_REF: "qint8_ref",
types_pb2.DT_QUINT8_REF: "quint8_ref",
types_pb2.DT_QINT16_REF: "qint16_ref",
types_pb2.DT_QUINT16_REF: "quint16_ref",
types_pb2.DT_QINT32_REF: "qint32_ref",
types_pb2.DT_BFLOAT16_REF: "bfloat16_ref",
}
_STRING_TO_TF = {value: _INTERN_TABLE[key]
for key, value in _TYPE_TO_STRING.items()}
# Add non-canonical aliases.
_STRING_TO_TF["half"] = float16
_STRING_TO_TF["half_ref"] = float16_ref
_STRING_TO_TF["float"] = float32
_STRING_TO_TF["float_ref"] = float32_ref
_STRING_TO_TF["double"] = float64
_STRING_TO_TF["double_ref"] = float64_ref
# Numpy representation for quantized dtypes.
#
# These are magic strings that are used in the swig wrapper to identify
# quantized types.
# TODO(mrry,keveman): Investigate Numpy type registration to replace this
# hard-coding of names.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
# Standard mappings between types_pb2.DataType values and numpy.dtypes.
_NP_TO_TF = frozenset([
(np.float16, float16),
(np.float32, float32),
(np.float64, float64),
(np.int32, int32),
(np.int64, int64),
(np.uint8, uint8),
(np.uint16, uint16),
(np.int16, int16),
(np.int8, int8),
(np.complex64, complex64),
(np.complex128, complex128),
(np.object, string),
(np.bool, bool),
(_np_qint8, qint8),
(_np_quint8, quint8),
(_np_qint16, qint16),
(_np_quint16, quint16),
(_np_qint32, qint32),
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
])
_TF_TO_NP = {
types_pb2.DT_HALF: np.float16,
types_pb2.DT_FLOAT: np.float32,
types_pb2.DT_DOUBLE: np.float64,
types_pb2.DT_INT32: np.int32,
types_pb2.DT_UINT8: np.uint8,
types_pb2.DT_UINT16: np.uint16,
types_pb2.DT_INT16: np.int16,
types_pb2.DT_INT8: np.int8,
# NOTE(touts): For strings we use np.object as it supports variable length
# strings.
types_pb2.DT_STRING: np.object,
types_pb2.DT_COMPLEX64: np.complex64,
types_pb2.DT_COMPLEX128: np.complex128,
types_pb2.DT_INT64: np.int64,
types_pb2.DT_BOOL: np.bool,
types_pb2.DT_QINT8: _np_qint8,
types_pb2.DT_QUINT8: _np_quint8,
types_pb2.DT_QINT16: _np_qint16,
types_pb2.DT_QUINT16: _np_quint16,
types_pb2.DT_QINT32: _np_qint32,
types_pb2.DT_BFLOAT16: np.uint16,
# Ref types
types_pb2.DT_HALF_REF: np.float16,
types_pb2.DT_FLOAT_REF: np.float32,
types_pb2.DT_DOUBLE_REF: np.float64,
types_pb2.DT_INT32_REF: np.int32,
types_pb2.DT_UINT8_REF: np.uint8,
types_pb2.DT_UINT16_REF: np.uint16,
types_pb2.DT_INT16_REF: np.int16,
types_pb2.DT_INT8_REF: np.int8,
types_pb2.DT_STRING_REF: np.object,
types_pb2.DT_COMPLEX64_REF: np.complex64,
types_pb2.DT_COMPLEX128_REF: np.complex128,
types_pb2.DT_INT64_REF: np.int64,
types_pb2.DT_BOOL_REF: np.bool,
types_pb2.DT_QINT8_REF: _np_qint8,
types_pb2.DT_QUINT8_REF: _np_quint8,
types_pb2.DT_QINT16_REF: _np_qint16,
types_pb2.DT_QUINT16_REF: _np_quint16,
types_pb2.DT_QINT32_REF: _np_qint32,
types_pb2.DT_BFLOAT16_REF: np.uint16,
}
QUANTIZED_DTYPES = frozenset(
[qint8, quint8, qint16, quint16, qint32, qint8_ref, quint8_ref, qint16_ref,
quint16_ref, qint32_ref])
def as_dtype(type_value):
"""Converts the given `type_value` to a `DType`.
Args:
type_value: A value that can be converted to a `tf.DType`
object. This may currently be a `tf.DType` object, a
[`DataType` enum](https://www.tensorflow.org/code/tensorflow/core/framework/types.proto),
a string type name, or a `numpy.dtype`.
Returns:
A `DType` corresponding to `type_value`.
Raises:
TypeError: If `type_value` cannot be converted to a `DType`.
"""
if isinstance(type_value, DType):
return type_value
try:
return _INTERN_TABLE[type_value]
except KeyError:
pass
try:
return _STRING_TO_TF[type_value]
except KeyError:
pass
if isinstance(type_value, np.dtype):
# The numpy dtype for strings is variable length. We can not compare
# dtype with a single constant (np.string does not exist) to decide
# dtype is a "string" type. We need to compare the dtype.type to be
# sure it's a string type.
if type_value.type == np.string_ or type_value.type == np.unicode_:
return string
for key, val in _NP_TO_TF:
if key == type_value:
return val
raise TypeError(
"Cannot convert value %r to a TensorFlow DType." % type_value)
| apache-2.0 |
younisd/django-basic-apps | basic/relationships/models.py | 8 | 4068 | from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.cache import cache
from django.conf import settings
RELATIONSHIP_CACHE = getattr(settings, 'RELATIONSHIP_CACHE', 60*60*24*7)
RELATIONSHIP_CACHE_KEYS = {
'FRIENDS': 'friends',
'FOLLOWERS': 'followers',
'BLOCKERS': 'blockers',
'FANS': 'fans'
}
class RelationshipManager(models.Manager):
def _set_cache(self, user, user_list, relationship_type, flat=False, flat_attr='to_user'):
cache_key = 'user_%s_%s' % (user.pk, relationship_type)
if flat:
cache_key = cache_key+'_flat'
user_list = user_list.values_list(flat_attr, flat=True)
if not cache.get(cache_key):
cache.set(cache_key, list(user_list), RELATIONSHIP_CACHE)
return user_list
def get_blockers_for_user(self, user, flat=False):
"""Returns list of people blocking user."""
user_list = self.filter(to_user=user, is_blocked=True)
return self._set_cache(user, user_list, RELATIONSHIP_CACHE_KEYS['BLOCKERS'], flat=flat, flat_attr='from_user')
def get_friends_for_user(self, user, flat=False):
"""Returns people user is following sans people blocking user."""
blocked_id_list = self.get_blockers_for_user(user, flat=True)
user_list = self.filter(from_user=user, is_blocked=False).exclude(to_user__in=blocked_id_list)
return self._set_cache(user, user_list, RELATIONSHIP_CACHE_KEYS['FRIENDS'], flat=flat)
def get_followers_for_user(self, user, flat=False):
"""Returns people following user."""
user_list = self.filter(to_user=user, is_blocked=False)
return self._set_cache(user, user_list, RELATIONSHIP_CACHE_KEYS['FOLLOWERS'], flat=flat, flat_attr='from_user')
def get_fans_for_user(self, user, flat=False):
"""Returns people following user but user isn't following."""
friend_id_list = self.get_friends_for_user(user, flat=True)
user_list = self.filter(to_user=user, is_blocked=False).exclude(from_user__in=friend_id_list)
return self._set_cache(user, user_list, RELATIONSHIP_CACHE_KEYS['FANS'], flat=flat, flat_attr='from_user')
def get_relationship(self, from_user, to_user):
try:
relationship = self.get(from_user=from_user, to_user=to_user)
except:
return None
return relationship
def blocking(self, from_user, to_user):
"""Returns True if from_user is blocking to_user."""
try:
relationship = self.get(from_user=from_user, to_user=to_user)
if relationship.is_blocked:
return True
except:
return False
return False
class Relationship(models.Model):
"""Relationship model"""
from_user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='from_users')
to_user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='to_users')
created = models.DateTimeField(auto_now_add=True)
is_blocked = models.BooleanField(default=False)
objects = RelationshipManager()
class Meta:
unique_together = (('from_user', 'to_user'),)
verbose_name = _('relationship')
verbose_name_plural = _('relationships')
db_table = 'relationships'
def __unicode__(self):
if self.is_blocked:
return u'%s is blocking %s' % (self.from_user, self.to_user)
return u'%s is connected to %s' % (self.from_user, self.to_user)
def save(self, *args, **kwargs):
self._delete_cache_keys()
super(Relationship, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self._delete_cache_keys()
super(Relationship, self).delete(*args, **kwargs)
def _delete_cache_keys(self):
for key in RELATIONSHIP_CACHE_KEYS:
cache.delete('user_%s_%s' % (self.from_user.pk, RELATIONSHIP_CACHE_KEYS[key]))
cache.delete('user_%s_%s_flat' % (self.from_user.pk, RELATIONSHIP_CACHE_KEYS[key])) | bsd-3-clause |
efortuna/AndroidSDKClone | ndk/prebuilt/linux-x86_64/lib/python2.7/test/test_xrange.py | 93 | 7825 | # Python test set -- built-in functions
import test.test_support, unittest
import sys
import pickle
import itertools
import warnings
warnings.filterwarnings("ignore", "integer argument expected",
DeprecationWarning, "unittest")
# pure Python implementations (3 args only), for comparison
def pyrange(start, stop, step):
if (start - stop) // step < 0:
# replace stop with next element in the sequence of integers
# that are congruent to start modulo step.
stop += (start - stop) % step
while start != stop:
yield start
start += step
def pyrange_reversed(start, stop, step):
stop += (start - stop) % step
return pyrange(stop - step, start - step, -step)
class XrangeTest(unittest.TestCase):
def assert_iterators_equal(self, xs, ys, test_id, limit=None):
# check that an iterator xs matches the expected results ys,
# up to a given limit.
if limit is not None:
xs = itertools.islice(xs, limit)
ys = itertools.islice(ys, limit)
sentinel = object()
pairs = itertools.izip_longest(xs, ys, fillvalue=sentinel)
for i, (x, y) in enumerate(pairs):
if x == y:
continue
elif x == sentinel:
self.fail('{}: iterator ended unexpectedly '
'at position {}; expected {}'.format(test_id, i, y))
elif y == sentinel:
self.fail('{}: unexpected excess element {} at '
'position {}'.format(test_id, x, i))
else:
self.fail('{}: wrong element at position {};'
'expected {}, got {}'.format(test_id, i, y, x))
def assert_xranges_equivalent(self, x, y):
# Check that two xrange objects are equivalent, in the sense of the
# associated sequences being the same. We want to use this for large
# xrange objects, so instead of converting to lists and comparing
# directly we do a number of indirect checks.
if len(x) != len(y):
self.fail('{} and {} have different '
'lengths: {} and {} '.format(x, y, len(x), len(y)))
if len(x) >= 1:
if x[0] != y[0]:
self.fail('{} and {} have different initial '
'elements: {} and {} '.format(x, y, x[0], y[0]))
if x[-1] != y[-1]:
self.fail('{} and {} have different final '
'elements: {} and {} '.format(x, y, x[-1], y[-1]))
if len(x) >= 2:
x_step = x[1] - x[0]
y_step = y[1] - y[0]
if x_step != y_step:
self.fail('{} and {} have different step: '
'{} and {} '.format(x, y, x_step, y_step))
def test_xrange(self):
self.assertEqual(list(xrange(3)), [0, 1, 2])
self.assertEqual(list(xrange(1, 5)), [1, 2, 3, 4])
self.assertEqual(list(xrange(0)), [])
self.assertEqual(list(xrange(-3)), [])
self.assertEqual(list(xrange(1, 10, 3)), [1, 4, 7])
self.assertEqual(list(xrange(5, -5, -3)), [5, 2, -1, -4])
a = 10
b = 100
c = 50
self.assertEqual(list(xrange(a, a+2)), [a, a+1])
self.assertEqual(list(xrange(a+2, a, -1L)), [a+2, a+1])
self.assertEqual(list(xrange(a+4, a, -2)), [a+4, a+2])
seq = list(xrange(a, b, c))
self.assertIn(a, seq)
self.assertNotIn(b, seq)
self.assertEqual(len(seq), 2)
seq = list(xrange(b, a, -c))
self.assertIn(b, seq)
self.assertNotIn(a, seq)
self.assertEqual(len(seq), 2)
seq = list(xrange(-a, -b, -c))
self.assertIn(-a, seq)
self.assertNotIn(-b, seq)
self.assertEqual(len(seq), 2)
self.assertRaises(TypeError, xrange)
self.assertRaises(TypeError, xrange, 1, 2, 3, 4)
self.assertRaises(ValueError, xrange, 1, 2, 0)
self.assertRaises(OverflowError, xrange, 10**100, 10**101, 10**101)
self.assertRaises(TypeError, xrange, 0, "spam")
self.assertRaises(TypeError, xrange, 0, 42, "spam")
self.assertEqual(len(xrange(0, sys.maxint, sys.maxint-1)), 2)
self.assertRaises(OverflowError, xrange, -sys.maxint, sys.maxint)
self.assertRaises(OverflowError, xrange, 0, 2*sys.maxint)
r = xrange(-sys.maxint, sys.maxint, 2)
self.assertEqual(len(r), sys.maxint)
self.assertRaises(OverflowError, xrange, -sys.maxint-1, sys.maxint, 2)
def test_pickling(self):
testcases = [(13,), (0, 11), (-22, 10), (20, 3, -1),
(13, 21, 3), (-2, 2, 2)]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for t in testcases:
r = xrange(*t)
self.assertEqual(list(pickle.loads(pickle.dumps(r, proto))),
list(r))
M = min(sys.maxint, sys.maxsize)
large_testcases = testcases + [
(0, M, 1),
(M, 0, -1),
(0, M, M - 1),
(M // 2, M, 1),
(0, -M, -1),
(0, -M, 1 - M),
(-M, M, 2),
(-M, M, 1024),
(-M, M, 10585),
(M, -M, -2),
(M, -M, -1024),
(M, -M, -10585),
]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for t in large_testcases:
r = xrange(*t)
r_out = pickle.loads(pickle.dumps(r, proto))
self.assert_xranges_equivalent(r_out, r)
def test_repr(self):
# Check that repr of an xrange is a valid representation
# of that xrange.
# Valid xranges have at most min(sys.maxint, sys.maxsize) elements.
M = min(sys.maxint, sys.maxsize)
testcases = [
(13,),
(0, 11),
(-22, 10),
(20, 3, -1),
(13, 21, 3),
(-2, 2, 2),
(0, M, 1),
(M, 0, -1),
(0, M, M - 1),
(M // 2, M, 1),
(0, -M, -1),
(0, -M, 1 - M),
(-M, M, 2),
(-M, M, 1024),
(-M, M, 10585),
(M, -M, -2),
(M, -M, -1024),
(M, -M, -10585),
]
for t in testcases:
r = xrange(*t)
r_out = eval(repr(r))
self.assert_xranges_equivalent(r, r_out)
def test_range_iterators(self):
# see issue 7298
limits = [base + jiggle
for M in (2**32, 2**64)
for base in (-M, -M//2, 0, M//2, M)
for jiggle in (-2, -1, 0, 1, 2)]
test_ranges = [(start, end, step)
for start in limits
for end in limits
for step in (-2**63, -2**31, -2, -1, 1, 2)]
for start, end, step in test_ranges:
try:
iter1 = xrange(start, end, step)
except OverflowError:
pass
else:
iter2 = pyrange(start, end, step)
test_id = "xrange({}, {}, {})".format(start, end, step)
# check first 100 entries
self.assert_iterators_equal(iter1, iter2, test_id, limit=100)
try:
iter1 = reversed(xrange(start, end, step))
except OverflowError:
pass
else:
iter2 = pyrange_reversed(start, end, step)
test_id = "reversed(xrange({}, {}, {}))".format(start, end, step)
self.assert_iterators_equal(iter1, iter2, test_id, limit=100)
def test_main():
test.test_support.run_unittest(XrangeTest)
if __name__ == "__main__":
test_main()
| apache-2.0 |
FineUploader/server-examples | python/django-fine-uploader/fine_uploader/views.py | 1 | 4519 | import json
import logging
import os
import os.path
import shutil
from django.conf import settings
from django.http import HttpResponse, HttpRequest
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
from fine_uploader.forms import UploadFileForm
from fine_uploader import utils
logger = logging.getLogger('django')
##
# Utils
##
def make_response(status=200, content_type='text/plain', content=None):
""" Construct a response to an upload request.
Success is indicated by a status of 200 and { "success": true }
contained in the content.
Also, content-type is text/plain by default since IE9 and below chokes
on application/json. For CORS environments and IE9 and below, the
content-type needs to be text/html.
"""
response = HttpResponse()
response.status_code = status
response['Content-Type'] = content_type
response.content = content
return response
##
# Views
##
def home(request):
""" The 'home' page. Returns an HTML page with Fine Uploader code
ready to upload. This HTML page should contain your client-side code
for instatiating and modifying Fine Uploader.
"""
return render(request, 'fine_uploader/index.html')
class UploadView(View):
""" View which will handle all upload requests sent by Fine Uploader.
See: https://docs.djangoproject.com/en/dev/topics/security/#user-uploaded-content-security
Handles POST and DELETE requests.
"""
@csrf_exempt
def dispatch(self, *args, **kwargs):
return super(UploadView, self).dispatch(*args, **kwargs)
def post(self, request, *args, **kwargs):
"""A POST request. Validate the form and then handle the upload
based ont the POSTed data. Does not handle extra parameters yet.
"""
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
handle_upload(request.FILES['qqfile'], form.cleaned_data)
return make_response(content=json.dumps({ 'success': True }))
else:
return make_response(status=400,
content=json.dumps({
'success': False,
'error': '%s' % repr(form.errors)
}))
def delete(self, request, *args, **kwargs):
"""A DELETE request. If found, deletes a file with the corresponding
UUID from the server's filesystem.
"""
qquuid = kwargs.get('qquuid', '')
if qquuid:
try:
handle_deleted_file(qquuid)
return make_response(content=json.dumps({ 'success': True }))
except Exception, e:
return make_response(status=400,
content=json.dumps({
'success': False,
'error': '%s' % repr(e)
}))
return make_response(status=404,
content=json.dumps({
'success': False,
'error': 'File not present'
}))
def handle_upload(f, fileattrs):
""" Handle a chunked or non-chunked upload.
"""
logger.info(fileattrs)
chunked = False
dest_folder = os.path.join(settings.UPLOAD_DIRECTORY, fileattrs['qquuid'])
dest = os.path.join(dest_folder, fileattrs['qqfilename'])
# Chunked
if fileattrs.get('qqtotalparts') and int(fileattrs['qqtotalparts']) > 1:
chunked = True
dest_folder = os.path.join(settings.CHUNKS_DIRECTORY, fileattrs['qquuid'])
dest = os.path.join(dest_folder, fileattrs['qqfilename'], str(fileattrs['qqpartindex']))
logger.info('Chunked upload received')
utils.save_upload(f, dest)
logger.info('Upload saved: %s' % dest)
# If the last chunk has been sent, combine the parts.
if chunked and (fileattrs['qqtotalparts'] - 1 == fileattrs['qqpartindex']):
logger.info('Combining chunks: %s' % os.path.dirname(dest))
utils.combine_chunks(fileattrs['qqtotalparts'],
fileattrs['qqtotalfilesize'],
source_folder=os.path.dirname(dest),
dest=os.path.join(settings.UPLOAD_DIRECTORY, fileattrs['qquuid'], fileattrs['qqfilename']))
logger.info('Combined: %s' % dest)
shutil.rmtree(os.path.dirname(os.path.dirname(dest)))
def handle_deleted_file(uuid):
""" Handles a filesystem delete based on UUID."""
logger.info(uuid)
loc = os.path.join(settings.UPLOAD_DIRECTORY, uuid)
shutil.rmtree(loc)
| mit |
Tiger66639/ansible-modules-core | cloud/amazon/ec2_key.py | 17 | 7291 | #!/usr/bin/python
# -*- coding: utf-8 -*-
DOCUMENTATION = '''
---
module: ec2_key
version_added: "1.5"
short_description: maintain an ec2 key pair.
description:
- maintains ec2 key pairs. This module has a dependency on python-boto >= 2.5
options:
name:
description:
- Name of the key pair.
required: true
key_material:
description:
- Public key material.
required: false
region:
description:
- the EC2 region to use
required: false
default: null
aliases: []
state:
description:
- create or delete keypair
required: false
default: 'present'
aliases: []
wait:
description:
- Wait for the specified action to complete before returning.
required: false
default: false
aliases: []
version_added: "1.6"
wait_timeout:
description:
- How long before wait gives up, in seconds
required: false
default: 300
aliases: []
version_added: "1.6"
extends_documentation_fragment: aws
author: "Vincent Viallet (@zbal)"
'''
EXAMPLES = '''
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Creates a new ec2 key pair named `example` if not present, returns generated
# private key
- name: example ec2 key
ec2_key:
name: example
# Creates a new ec2 key pair named `example` if not present using provided key
# material. This could use the 'file' lookup plugin to pull this off disk.
- name: example2 ec2 key
ec2_key:
name: example2
key_material: 'ssh-rsa AAAAxyz...== me@example.com'
state: present
# Creates a new ec2 key pair named `example` if not present using provided key
# material
- name: example3 ec2 key
ec2_key:
name: example3
key_material: "{{ item }}"
with_file: /path/to/public_key.id_rsa.pub
# Removes ec2 key pair by name
- name: remove example key
ec2_key:
name: example
state: absent
'''
try:
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
import random
import string
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
key_material=dict(required=False),
state = dict(default='present', choices=['present', 'absent']),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
name = module.params['name']
state = module.params.get('state')
key_material = module.params.get('key_material')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
ec2 = ec2_connect(module)
# find the key if present
key = ec2.get_key_pair(name)
# Ensure requested key is absent
if state == 'absent':
if key:
'''found a match, delete it'''
try:
key.delete()
if wait:
start = time.time()
action_complete = False
while (time.time() - start) < wait_timeout:
if not ec2.get_key_pair(name):
action_complete = True
break
time.sleep(1)
if not action_complete:
module.fail_json(msg="timed out while waiting for the key to be removed")
except Exception, e:
module.fail_json(msg="Unable to delete key pair '%s' - %s" % (key, e))
else:
key = None
changed = True
else:
'''no match found, no changes required'''
# Ensure requested key is present
elif state == 'present':
if key:
# existing key found
if key_material:
# EC2's fingerprints are non-trivial to generate, so push this key
# to a temporary name and make ec2 calculate the fingerprint for us.
#
# http://blog.jbrowne.com/?p=23
# https://forums.aws.amazon.com/thread.jspa?messageID=352828
# find an unused name
test = 'empty'
while test:
randomchars = [random.choice(string.ascii_letters + string.digits) for x in range(0,10)]
tmpkeyname = "ansible-" + ''.join(randomchars)
test = ec2.get_key_pair(tmpkeyname)
# create tmp key
tmpkey = ec2.import_key_pair(tmpkeyname, key_material)
# get tmp key fingerprint
tmpfingerprint = tmpkey.fingerprint
# delete tmp key
tmpkey.delete()
if key.fingerprint != tmpfingerprint:
if not module.check_mode:
key.delete()
key = ec2.import_key_pair(name, key_material)
if wait:
start = time.time()
action_complete = False
while (time.time() - start) < wait_timeout:
if ec2.get_key_pair(name):
action_complete = True
break
time.sleep(1)
if not action_complete:
module.fail_json(msg="timed out while waiting for the key to be re-created")
changed = True
pass
# if the key doesn't exist, create it now
else:
'''no match found, create it'''
if not module.check_mode:
if key_material:
'''We are providing the key, need to import'''
key = ec2.import_key_pair(name, key_material)
else:
'''
No material provided, let AWS handle the key creation and
retrieve the private key
'''
key = ec2.create_key_pair(name)
if wait:
start = time.time()
action_complete = False
while (time.time() - start) < wait_timeout:
if ec2.get_key_pair(name):
action_complete = True
break
time.sleep(1)
if not action_complete:
module.fail_json(msg="timed out while waiting for the key to be created")
changed = True
if key:
data = {
'name': key.name,
'fingerprint': key.fingerprint
}
if key.material:
data.update({'private_key': key.material})
module.exit_json(changed=changed, key=data)
else:
module.exit_json(changed=changed, key=None)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
Dark-Hacker/horizon | openstack_dashboard/test/test_data/exceptions.py | 14 | 3546 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ceilometerclient.exc as ceilometer_exceptions
from cinderclient import exceptions as cinder_exceptions
import glanceclient.exc as glance_exceptions
from keystoneclient import exceptions as keystone_exceptions
from neutronclient.common import exceptions as neutron_exceptions
from novaclient import exceptions as nova_exceptions
import six
from swiftclient import client as swift_exceptions
from troveclient import exceptions as trove_exceptions
from openstack_dashboard.test.test_data import utils
def create_stubbed_exception(cls, status_code=500):
msg = "Expected failure."
def fake_init_exception(self, code=None, message=None, **kwargs):
if code is not None:
if hasattr(self, 'http_status'):
self.http_status = code
else:
self.code = code
self.message = message or self.__class__.message
try:
# Neutron sometimes updates the message with additional
# information, like a reason.
self.message = self.message % kwargs
except Exception:
pass # We still have the main error message.
def fake_str(self):
return str(self.message)
def fake_unicode(self):
return six.text_type(self.message)
cls.__init__ = fake_init_exception
cls.__str__ = fake_str
cls.__unicode__ = fake_unicode
cls.silence_logging = True
return cls(status_code, msg)
def data(TEST):
TEST.exceptions = utils.TestDataContainer()
unauth = keystone_exceptions.Unauthorized
TEST.exceptions.keystone_unauthorized = create_stubbed_exception(unauth)
keystone_exception = keystone_exceptions.ClientException
TEST.exceptions.keystone = create_stubbed_exception(keystone_exception)
nova_exception = nova_exceptions.ClientException
TEST.exceptions.nova = create_stubbed_exception(nova_exception)
nova_unauth = nova_exceptions.Unauthorized
TEST.exceptions.nova_unauthorized = create_stubbed_exception(nova_unauth)
glance_exception = glance_exceptions.ClientException
TEST.exceptions.glance = create_stubbed_exception(glance_exception)
ceilometer_exception = ceilometer_exceptions.HTTPException
TEST.exceptions.ceilometer = create_stubbed_exception(ceilometer_exception)
neutron_exception = neutron_exceptions.NeutronClientException
TEST.exceptions.neutron = create_stubbed_exception(neutron_exception)
swift_exception = swift_exceptions.ClientException
TEST.exceptions.swift = create_stubbed_exception(swift_exception)
cinder_exception = cinder_exceptions.BadRequest
TEST.exceptions.cinder = create_stubbed_exception(cinder_exception)
trove_exception = trove_exceptions.ClientException
TEST.exceptions.trove = create_stubbed_exception(trove_exception)
trove_auth = trove_exceptions.Unauthorized
TEST.exceptions.trove_unauthorized = \
create_stubbed_exception(trove_auth)
| apache-2.0 |
mKeRix/home-assistant | homeassistant/components/daikin/switch.py | 7 | 2240 | """Support for Daikin AirBase zones."""
import logging
from homeassistant.helpers.entity import ToggleEntity
from . import DOMAIN as DAIKIN_DOMAIN
_LOGGER = logging.getLogger(__name__)
ZONE_ICON = "mdi:home-circle"
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Old way of setting up the platform.
Can only be called when a user accidentally mentions the platform in their
config. But even in that case it would have been ignored.
"""
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Daikin climate based on config_entry."""
daikin_api = hass.data[DAIKIN_DOMAIN][entry.entry_id]
zones = daikin_api.device.zones
if zones:
async_add_entities(
[
DaikinZoneSwitch(daikin_api, zone_id)
for zone_id, zone in enumerate(zones)
if zone != ("-", "0")
]
)
class DaikinZoneSwitch(ToggleEntity):
"""Representation of a zone."""
def __init__(self, daikin_api, zone_id):
"""Initialize the zone."""
self._api = daikin_api
self._zone_id = zone_id
@property
def unique_id(self):
"""Return a unique ID."""
return f"{self._api.device.mac}-zone{self._zone_id}"
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ZONE_ICON
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._api.name} {self._api.device.zones[self._zone_id][0]}"
@property
def is_on(self):
"""Return the state of the sensor."""
return self._api.device.zones[self._zone_id][1] == "1"
@property
def device_info(self):
"""Return a device description for device registry."""
return self._api.device_info
async def async_update(self):
"""Retrieve latest state."""
await self._api.async_update()
async def async_turn_on(self, **kwargs):
"""Turn the zone on."""
await self._api.device.set_zone(self._zone_id, "1")
async def async_turn_off(self, **kwargs):
"""Turn the zone off."""
await self._api.device.set_zone(self._zone_id, "0")
| mit |
eirmag/weboob | weboob/applications/qvideoob/main_window.py | 1 | 5387 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from PyQt4.QtCore import SIGNAL
from weboob.capabilities.video import ICapVideo
from weboob.tools.application.qt import QtMainWindow, QtDo
from weboob.tools.application.qt.backendcfg import BackendCfg
from weboob.applications.qvideoob.ui.main_window_ui import Ui_MainWindow
from .video import Video
from .minivideo import MiniVideo
class MainWindow(QtMainWindow):
def __init__(self, config, weboob, parent=None):
QtMainWindow.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.config = config
self.weboob = weboob
self.minivideos = []
self.ui.sortbyEdit.setCurrentIndex(int(self.config.get('settings', 'sortby')))
self.ui.nsfwCheckBox.setChecked(int(self.config.get('settings', 'nsfw')))
self.ui.sfwCheckBox.setChecked(int(self.config.get('settings', 'sfw')))
self.connect(self.ui.searchEdit, SIGNAL("returnPressed()"), self.search)
self.connect(self.ui.urlEdit, SIGNAL("returnPressed()"), self.openURL)
self.connect(self.ui.nsfwCheckBox, SIGNAL("stateChanged(int)"), self.nsfwChanged)
self.connect(self.ui.sfwCheckBox, SIGNAL("stateChanged(int)"), self.sfwChanged)
self.connect(self.ui.actionBackends, SIGNAL("triggered()"), self.backendsConfig)
self.loadBackendsList()
if self.ui.backendEdit.count() == 0:
self.backendsConfig()
def backendsConfig(self):
bckndcfg = BackendCfg(self.weboob, (ICapVideo,), self)
if bckndcfg.run():
self.loadBackendsList()
def loadBackendsList(self):
self.ui.backendEdit.clear()
for i, backend in enumerate(self.weboob.iter_backends()):
if i == 0:
self.ui.backendEdit.addItem('All backends', '')
self.ui.backendEdit.addItem(backend.name, backend.name)
if backend.name == self.config.get('settings', 'backend'):
self.ui.backendEdit.setCurrentIndex(i+1)
if self.ui.backendEdit.count() == 0:
self.ui.searchEdit.setEnabled(False)
self.ui.urlEdit.setEnabled(False)
else:
self.ui.searchEdit.setEnabled(True)
self.ui.urlEdit.setEnabled(True)
def nsfwChanged(self, state):
self.config.set('settings', 'nsfw', int(self.ui.nsfwCheckBox.isChecked()))
self.updateVideosDisplay()
def sfwChanged(self, state):
self.config.set('settings', 'sfw', int(self.ui.sfwCheckBox.isChecked()))
self.updateVideosDisplay()
def updateVideosDisplay(self):
for minivideo in self.minivideos:
if (minivideo.video.nsfw and self.ui.nsfwCheckBox.isChecked() or
not minivideo.video.nsfw and self.ui.sfwCheckBox.isChecked()):
minivideo.show()
else:
minivideo.hide()
def search(self):
pattern = unicode(self.ui.searchEdit.text())
if not pattern:
return
for minivideo in self.minivideos:
self.ui.scrollAreaContent.layout().removeWidget(minivideo)
minivideo.hide()
minivideo.deleteLater()
self.minivideos = []
self.ui.searchEdit.setEnabled(False)
backend_name = str(self.ui.backendEdit.itemData(self.ui.backendEdit.currentIndex()).toString())
self.process = QtDo(self.weboob, self.addVideo)
self.process.do('search_videos', pattern, self.ui.sortbyEdit.currentIndex(), nsfw=True, max_results=20, backends=backend_name)
def addVideo(self, backend, video):
if not backend:
self.ui.searchEdit.setEnabled(True)
self.process = None
return
minivideo = MiniVideo(self.weboob, backend, video)
self.ui.scrollAreaContent.layout().addWidget(minivideo)
self.minivideos.append(minivideo)
if (video.nsfw and not self.ui.nsfwCheckBox.isChecked() or
not video.nsfw and not self.ui.sfwCheckBox.isChecked()):
minivideo.hide()
def openURL(self):
url = unicode(self.ui.urlEdit.text())
if not url:
return
for backend in self.weboob.iter_backends():
video = backend.get_video(url)
if video:
video_widget = Video(video, self)
video_widget.show()
self.ui.urlEdit.clear()
def closeEvent(self, ev):
self.config.set('settings', 'backend', str(self.ui.backendEdit.itemData(self.ui.backendEdit.currentIndex()).toString()))
self.config.set('settings', 'sortby', self.ui.sortbyEdit.currentIndex())
self.config.save()
ev.accept()
| agpl-3.0 |
dendisuhubdy/tensorflow | tensorflow/contrib/boosted_trees/examples/mnist.py | 64 | 5840 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Demonstrates multiclass MNIST TF Boosted trees example.
This example demonstrates how to run experiments with TF Boosted Trees on
a MNIST dataset. We are using layer by layer boosting with diagonal hessian
strategy for multiclass handling, and cross entropy loss.
Example Usage:
python tensorflow/contrib/boosted_trees/examples/mnist.py \
--output_dir="/tmp/mnist" --depth=4 --learning_rate=0.3 --batch_size=60000 \
--examples_per_layer=60000 --eval_batch_size=10000 --num_eval_steps=1 \
--num_trees=10 --l2=1 --vmodule=training_ops=1
When training is done, accuracy on eval data is reported. Point tensorboard
to the directory for the run to see how the training progresses:
tensorboard --logdir=/tmp/mnist
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import tensorflow as tf
from tensorflow.contrib.boosted_trees.estimator_batch.estimator import GradientBoostedDecisionTreeClassifier
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.learn import learn_runner
def get_input_fn(dataset_split,
batch_size,
capacity=10000,
min_after_dequeue=3000):
"""Input function over MNIST data."""
def _input_fn():
"""Prepare features and labels."""
images_batch, labels_batch = tf.train.shuffle_batch(
tensors=[dataset_split.images,
dataset_split.labels.astype(np.int32)],
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
enqueue_many=True,
num_threads=4)
features_map = {"images": images_batch}
return features_map, labels_batch
return _input_fn
# Main config - creates a TF Boosted Trees Estimator based on flags.
def _get_tfbt(output_dir):
"""Configures TF Boosted Trees estimator based on flags."""
learner_config = learner_pb2.LearnerConfig()
num_classes = 10
learner_config.learning_rate_tuner.fixed.learning_rate = FLAGS.learning_rate
learner_config.num_classes = num_classes
learner_config.regularization.l1 = 0.0
learner_config.regularization.l2 = FLAGS.l2 / FLAGS.examples_per_layer
learner_config.constraints.max_tree_depth = FLAGS.depth
growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER
learner_config.growing_mode = growing_mode
run_config = tf.contrib.learn.RunConfig(save_checkpoints_secs=300)
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
# Create a TF Boosted trees estimator that can take in custom loss.
estimator = GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
n_classes=num_classes,
examples_per_layer=FLAGS.examples_per_layer,
model_dir=output_dir,
num_trees=FLAGS.num_trees,
center_bias=False,
config=run_config)
return estimator
def _make_experiment_fn(output_dir):
"""Creates experiment for gradient boosted decision trees."""
data = tf.contrib.learn.datasets.mnist.load_mnist()
train_input_fn = get_input_fn(data.train, FLAGS.batch_size)
eval_input_fn = get_input_fn(data.validation, FLAGS.eval_batch_size)
return tf.contrib.learn.Experiment(
estimator=_get_tfbt(output_dir),
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=None,
eval_steps=FLAGS.num_eval_steps,
eval_metrics=None)
def main(unused_argv):
learn_runner.run(
experiment_fn=_make_experiment_fn,
output_dir=FLAGS.output_dir,
schedule="train_and_evaluate")
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
parser = argparse.ArgumentParser()
# Define the list of flags that users can change.
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Choose the dir for the output.")
parser.add_argument(
"--batch_size",
type=int,
default=1000,
help="The batch size for reading data.")
parser.add_argument(
"--eval_batch_size",
type=int,
default=1000,
help="Size of the batch for eval.")
parser.add_argument(
"--num_eval_steps",
type=int,
default=1,
help="The number of steps to run evaluation for.")
# Flags for gradient boosted trees config.
parser.add_argument(
"--depth", type=int, default=4, help="Maximum depth of weak learners.")
parser.add_argument(
"--l2", type=float, default=1.0, help="l2 regularization per batch.")
parser.add_argument(
"--learning_rate",
type=float,
default=0.1,
help="Learning rate (shrinkage weight) with which each new tree is added."
)
parser.add_argument(
"--examples_per_layer",
type=int,
default=1000,
help="Number of examples to accumulate stats for per layer.")
parser.add_argument(
"--num_trees",
type=int,
default=None,
required=True,
help="Number of trees to grow before stopping.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
andrewslotin/GitRebase | git.py | 1 | 1964 | import sublime
import os, shlex
import subprocess
class Git:
def __init__(self, cwd):
self._cwd = cwd
self._git = "git"
s = sublime.load_settings("GitRebase.sublime-settings")
if s.get('git_command'):
self._git = s.get('git_command')
def log(self, limit = None):
git_command = "log --oneline --no-color --no-abbrev-commit"
current_branch = self.current_branch()
if current_branch != "master":
git_command += " master..{}".format(current_branch)
if limit != None:
git_command += " -n {}".format(limit)
sublime.status_message(git_command)
history = self._run(git_command)
return [tuple(line.split(" ", 1)) for line in history.splitlines()]
def edit_revision(self, rev):
self._run("rebase --autostash --interactive {}~1".format(rev), GIT_SEQUENCE_EDITOR="sed -i '' -e 's/^\s*pick {0}/edit {0}/g'".format(rev[0:7]))
def abort_rebase(self):
self._run("rebase --abort")
def continue_rebase(self):
self._run("rebase --continue")
def stash_changes(self):
self._run("stash")
def apply_stash(self):
self._run("stash pop")
def current_branch(self):
return self._run("rev-parse --abbrev-ref HEAD")
def is_clean(self):
return len(self._run("status -s -uno")) == 0
def _run(self, command, **env_vars):
cwd = self._cwd
if cwd != None and not os.path.isdir(self._cwd):
cwd = None
environ = os.environ.copy()
for (var, val) in env_vars.items():
environ[var] = val
cmd = [self._git] + shlex.split(command)
with subprocess.Popen(cmd, stdin=subprocess.DEVNULL, stderr=subprocess.PIPE, stdout=subprocess.PIPE, cwd=cwd, env=environ) as git:
output = ""
try:
[output, error] = git.communicate(timeout=5)
if error:
print(error.decode('utf-8'))
except subprocess.TimeoutExpired:
print("`{0} {1}` timed out".format(GIT, command))
return output.strip().decode('utf-8')
| mit |
shrimpboyho/git.js | emscript/python/2.7.5.1_32bit/Lib/site-packages/win32comext/ifilter/demo/filterDemo.py | 18 | 11010 | import pythoncom
import pywintypes
from win32com.ifilter import ifilter
from win32com.ifilter.ifiltercon import *
from win32com import storagecon
class FileParser:
# Property IDs for the Storage Property Set
PIDS_BODY = 0x00000013
# property IDs for HTML Storage Property Set
PIDH_DESCRIPTION = "DESCRIPTION"
PIDH_HREF = "A.HREF"
PIDH_IMGSRC = "IMG.SRC"
# conversion map to convert ifilter properties to more user friendly names
propertyToName = {PSGUID_STORAGE : {PIDS_BODY : 'body'},
PSGUID_SUMMARYINFORMATION : {PIDSI_TITLE : 'title',
PIDSI_SUBJECT : 'description',
PIDSI_AUTHOR : 'author',
PIDSI_KEYWORDS : 'keywords',
PIDSI_COMMENTS : 'comments'},
PSGUID_HTMLINFORMATION : {PIDH_DESCRIPTION : 'description'},
PSGUID_HTML2_INFORMATION : {PIDH_HREF : 'href',
PIDH_IMGSRC : 'img'}
}
def __init__(self, verbose=False):
self.f = None
self.stg = None
self.verbose = verbose
def Close(self):
self.f = None
self.stg = None
def Parse(self, fileName, maxErrors=10):
properties = {}
try:
self._bind_to_filter(fileName)
try:
flags = self.f.Init(IFILTER_INIT_APPLY_INDEX_ATTRIBUTES | IFILTER_INIT_APPLY_OTHER_ATTRIBUTES)
if flags == IFILTER_FLAGS_OLE_PROPERTIES and self.stg is not None:
self._trace('filter requires to get properities via ole')
self._get_properties(properties)
errCnt = 0
while True:
try:
# each chunk returns a tuple with the following:-
# idChunk = The chunk identifier. each chunk has a unique identifier
# breakType = The type of break that separates the previous chunk from the current chunk. Values are:-
# CHUNK_NO_BREAK=0,CHUNK_EOW=1,CHUNK_EOS= 2,CHUNK_EOP= 3,CHUNK_EOC= 4
# flags = Flags indicate whether this chunk contains a text-type or a value-type property
# locale = The language and sublanguage associated with a chunk of text
# attr = A tuple containing the property to be applied to the chunk. Tuple is (propertyset GUID, property ID)
# Property ID can be a number or string
# idChunkSource = The ID of the source of a chunk. The value of the idChunkSource member depends on the nature of the chunk
# startSource = The offset from which the source text for a derived chunk starts in the source chunk
# lenSource = The length in characters of the source text from which the current chunk was derived.
# A zero value signifies character-by-character correspondence between the source text and the derived text.
idChunk, breakType, flags, locale, attr, idChunkSource, startSource,lenSource = self.f.GetChunk()
self._trace("Chunk details:", idChunk, breakType, flags, locale, attr, idChunkSource, startSource,lenSource)
# attempt to map each property to a more user friendly name. If we don't know what it is just return
# the set guid and property id. (note: the id can be a number or a string.
propSet = self.propertyToName.get(attr[0])
if propSet:
propName = propSet.get(attr[1], '%s:%s' % attr)
else:
propName = '%s:%s' % attr
except pythoncom.com_error, e:
if e[0] == FILTER_E_END_OF_CHUNKS:
# we have read all the chunks
break
elif e[0] in [FILTER_E_EMBEDDING_UNAVAILABLE, FILTER_E_LINK_UNAVAILABLE]:
# the next chunk can't be read. Also keep track of the number of times we
# fail as some filters (ie. the Msoft office ones can get stuck here)
errCnt += 1
if errCnt > maxErrors:
raise
else:
continue
elif e[0] == FILTER_E_ACCESS:
self._trace('Access denied')
raise
elif e[0] == FILTER_E_PASSWORD:
self._trace('Password required')
raise
else:
# any other type of error really can't be recovered from
raise
# reset consecutive errors (some filters may get stuck in a lopp if embedding or link failures occurs
errCnt = 0
if flags == CHUNK_TEXT:
# its a text segment - get all available text for this chunk.
body_chunks = properties.setdefault(propName, [])
self._get_text(body_chunks)
elif flags == CHUNK_VALUE:
# its a data segment - get the value
properties[propName] = self.f.GetValue()
else:
self._trace("Unknown flag returned by GetChunk:", flags)
finally:
self.Close()
except pythoncom.com_error, e:
self._trace("ERROR processing file", e)
raise
return properties
def _bind_to_filter(self, fileName):
"""
See if the file is a structured storage file or a normal file
and then return an ifilter interface by calling the appropriate bind/load function
"""
if pythoncom.StgIsStorageFile(fileName):
self.stg = pythoncom.StgOpenStorage(fileName, None, storagecon.STGM_READ | storagecon.STGM_SHARE_DENY_WRITE)
try:
self.f = ifilter.BindIFilterFromStorage(self.stg)
except pythoncom.com_error, e:
if e[0] == -2147467262: # 0x80004002: # no interface, try the load interface (this happens for some MSoft files)
self.f = ifilter.LoadIFilter(fileName)
else:
raise
else:
self.f = ifilter.LoadIFilter(fileName)
self.stg = None
def _get_text(self, body_chunks):
"""
Gets all the text for a particular chunk. We need to keep calling get text till all the
segments for this chunk are retrieved
"""
while True:
try:
body_chunks.append(self.f.GetText())
except pythoncom.com_error, e:
if e[0] in [FILTER_E_NO_MORE_TEXT, FILTER_E_NO_MORE_TEXT, FILTER_E_NO_TEXT]:
break
else:
raise # not one of the values we were expecting
def _get_properties(self, properties):
"""
Use OLE property sets to get base properties
"""
try:
pss = self.stg.QueryInterface(pythoncom.IID_IPropertySetStorage )
except pythoncom.com_error, e:
self._trace('No Property information could be retrieved', e)
return
ps = pss.Open(PSGUID_SUMMARYINFORMATION)
props = (PIDSI_TITLE, PIDSI_SUBJECT, PIDSI_AUTHOR, PIDSI_KEYWORDS, PIDSI_COMMENTS)
title, subject, author, keywords, comments = ps.ReadMultiple(props)
if title is not None:
properties['title'] = title
if subject is not None:
properties['description'] = subject
if author is not None:
properties['author'] = author
if keywords is not None:
properties['keywords'] = keywords
if comments is not None:
properties['comments'] = comments
def _trace(self, *args):
if self.verbose:
ret = ' '.join([str(arg) for arg in args])
try:
print ret
except IOError:
pass
def _usage():
import os
print "Usage: %s filename [verbose [dumpbody]]" % (os.path.basename(sys.argv[0]),)
print
print "Where:-"
print "filename = name of the file to extract text & properties from"
print "verbose = 1=debug output, 0=no debug output (default=0)"
print "dumpbody = 1=print text content, 0=don't print content (default=1)"
print
print "e.g. to dump a word file called spam.doc go:- filterDemo.py spam.doc"
print
print "by default .htm, .txt, .doc, .dot, .xls, .xlt, .ppt are supported"
print "you can filter .pdf's by downloading adobes ifilter component. "
print "(currently found at http://download.adobe.com/pub/adobe/acrobat/win/all/ifilter50.exe)."
print "ifilters for other filetypes are also available."
print
print "This extension is only supported on win2000 & winXP - because thats the only"
print "place the ifilter stuff is supported. For more info on the API check out "
print "MSDN under ifilters"
if __name__ == "__main__":
import sys
import operator
fName = ''
verbose = False
bDumpBody =True
if len(sys.argv) < 2:
_usage()
sys.exit(1)
try:
fName = sys.argv[1]
verbose = (sys.argv[2]!="0")
bDumpBody = (sys.argv[3]!="0")
except:
pass
p = FileParser(verbose)
propMap = p.Parse(fName)
if bDumpBody:
print "Body"
ch = ' '.join(propMap.get('body', []))
try:
print ch
except UnicodeError:
print ch.encode('iso8859-1','ignore')
print "Properties"
for propName, propValue in propMap.iteritems():
print propName,":",
if propName == 'body':
print "<%s length: %d>" % (propName, reduce(operator.add, [len(p) for p in propValue]),)
elif type(propValue) == type([]):
print
for pv in propValue:
print pv
else:
print propValue
print
| gpl-2.0 |
nathanial/lettuce | tests/integration/lib/Django-1.3/tests/regressiontests/select_related_regress/tests.py | 51 | 6336 | from django.test import TestCase
from regressiontests.select_related_regress.models import *
class SelectRelatedRegressTests(TestCase):
def test_regression_7110(self):
"""
Regression test for bug #7110.
When using select_related(), we must query the
Device and Building tables using two different aliases (each) in order to
differentiate the start and end Connection fields. The net result is that
both the "connections = ..." queries here should give the same results
without pulling in more than the absolute minimum number of tables
(history has shown that it's easy to make a mistake in the implementation
and include some unnecessary bonus joins).
"""
b=Building.objects.create(name='101')
dev1=Device.objects.create(name="router", building=b)
dev2=Device.objects.create(name="switch", building=b)
dev3=Device.objects.create(name="server", building=b)
port1=Port.objects.create(port_number='4',device=dev1)
port2=Port.objects.create(port_number='7',device=dev2)
port3=Port.objects.create(port_number='1',device=dev3)
c1=Connection.objects.create(start=port1, end=port2)
c2=Connection.objects.create(start=port2, end=port3)
connections=Connection.objects.filter(start__device__building=b, end__device__building=b).order_by('id')
self.assertEqual([(c.id, unicode(c.start), unicode(c.end)) for c in connections],
[(c1.id, u'router/4', u'switch/7'), (c2.id, u'switch/7', u'server/1')])
connections=Connection.objects.filter(start__device__building=b, end__device__building=b).select_related().order_by('id')
self.assertEqual([(c.id, unicode(c.start), unicode(c.end)) for c in connections],
[(c1.id, u'router/4', u'switch/7'), (c2.id, u'switch/7', u'server/1')])
# This final query should only join seven tables (port, device and building
# twice each, plus connection once).
self.assertEqual(connections.query.count_active_tables(), 7)
def test_regression_8106(self):
"""
Regression test for bug #8106.
Same sort of problem as the previous test, but this time there are
more extra tables to pull in as part of the select_related() and some
of them could potentially clash (so need to be kept separate).
"""
us = TUser.objects.create(name="std")
usp = Person.objects.create(user=us)
uo = TUser.objects.create(name="org")
uop = Person.objects.create(user=uo)
s = Student.objects.create(person = usp)
o = Organizer.objects.create(person = uop)
c = Class.objects.create(org=o)
e = Enrollment.objects.create(std=s, cls=c)
e_related = Enrollment.objects.all().select_related()[0]
self.assertEqual(e_related.std.person.user.name, u"std")
self.assertEqual(e_related.cls.org.person.user.name, u"org")
def test_regression_8036(self):
"""
Regression test for bug #8036
the first related model in the tests below
("state") is empty and we try to select the more remotely related
state__country. The regression here was not skipping the empty column results
for country before getting status.
"""
australia = Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
client = Client.objects.create(name='client', status=active)
self.assertEqual(client.status, active)
self.assertEqual(Client.objects.select_related()[0].status, active)
self.assertEqual(Client.objects.select_related('state')[0].status, active)
self.assertEqual(Client.objects.select_related('state', 'status')[0].status, active)
self.assertEqual(Client.objects.select_related('state__country')[0].status, active)
self.assertEqual(Client.objects.select_related('state__country', 'status')[0].status, active)
self.assertEqual(Client.objects.select_related('status')[0].status, active)
def test_multi_table_inheritance(self):
""" Exercising select_related() with multi-table model inheritance. """
c1 = Child.objects.create(name="child1", value=42)
i1 = Item.objects.create(name="item1", child=c1)
i2 = Item.objects.create(name="item2")
self.assertQuerysetEqual(
Item.objects.select_related("child").order_by("name"),
["<Item: item1>", "<Item: item2>"]
)
def test_regression_12851(self):
"""
Regression for #12851
Deferred fields are used correctly if you select_related a subset
of fields.
"""
australia = Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
wa = State.objects.create(name="Western Australia", country=australia)
c1 = Client.objects.create(name='Brian Burke', state=wa, status=active)
burke = Client.objects.select_related('state').defer('state__name').get(name='Brian Burke')
self.assertEqual(burke.name, u'Brian Burke')
self.assertEqual(burke.state.name, u'Western Australia')
# Still works if we're dealing with an inherited class
sc1 = SpecialClient.objects.create(name='Troy Buswell', state=wa, status=active, value=42)
troy = SpecialClient.objects.select_related('state').defer('state__name').get(name='Troy Buswell')
self.assertEqual(troy.name, u'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, u'Western Australia')
# Still works if we defer an attribute on the inherited class
troy = SpecialClient.objects.select_related('state').defer('value', 'state__name').get(name='Troy Buswell')
self.assertEqual(troy.name, u'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, u'Western Australia')
# Also works if you use only, rather than defer
troy = SpecialClient.objects.select_related('state').only('name').get(name='Troy Buswell')
self.assertEqual(troy.name, u'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, u'Western Australia')
| gpl-3.0 |
mollstam/UnrealPy | UnrealPyEmbed/Source/Python/Lib/python27/test/test_int_literal.py | 138 | 9128 | """Test correct treatment of hex/oct constants.
This is complex because of changes due to PEP 237.
"""
import unittest
from test import test_support
class TestHexOctBin(unittest.TestCase):
def test_hex_baseline(self):
# A few upper/lowercase tests
self.assertEqual(0x0, 0X0)
self.assertEqual(0x1, 0X1)
self.assertEqual(0x123456789abcdef, 0X123456789abcdef)
# Baseline tests
self.assertEqual(0x0, 0)
self.assertEqual(0x10, 16)
self.assertEqual(0x7fffffff, 2147483647)
self.assertEqual(0x7fffffffffffffff, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x0), 0)
self.assertEqual(-(0x10), -16)
self.assertEqual(-(0x7fffffff), -2147483647)
self.assertEqual(-(0x7fffffffffffffff), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-0x0, 0)
self.assertEqual(-0x10, -16)
self.assertEqual(-0x7fffffff, -2147483647)
self.assertEqual(-0x7fffffffffffffff, -9223372036854775807)
def test_hex_unsigned(self):
# Positive constants
self.assertEqual(0x80000000, 2147483648L)
self.assertEqual(0xffffffff, 4294967295L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x80000000), -2147483648L)
self.assertEqual(-(0xffffffff), -4294967295L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0x80000000, -2147483648L)
self.assertEqual(-0xffffffff, -4294967295L)
# Positive constants
self.assertEqual(0x8000000000000000, 9223372036854775808L)
self.assertEqual(0xffffffffffffffff, 18446744073709551615L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x8000000000000000), -9223372036854775808L)
self.assertEqual(-(0xffffffffffffffff), -18446744073709551615L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0x8000000000000000, -9223372036854775808L)
self.assertEqual(-0xffffffffffffffff, -18446744073709551615L)
def test_oct_baseline(self):
# Baseline tests
self.assertEqual(00, 0)
self.assertEqual(020, 16)
self.assertEqual(017777777777, 2147483647)
self.assertEqual(0777777777777777777777, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(00), 0)
self.assertEqual(-(020), -16)
self.assertEqual(-(017777777777), -2147483647)
self.assertEqual(-(0777777777777777777777), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-00, 0)
self.assertEqual(-020, -16)
self.assertEqual(-017777777777, -2147483647)
self.assertEqual(-0777777777777777777777, -9223372036854775807)
def test_oct_baseline_new(self):
# A few upper/lowercase tests
self.assertEqual(0o0, 0O0)
self.assertEqual(0o1, 0O1)
self.assertEqual(0o1234567, 0O1234567)
# Baseline tests
self.assertEqual(0o0, 0)
self.assertEqual(0o20, 16)
self.assertEqual(0o17777777777, 2147483647)
self.assertEqual(0o777777777777777777777, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0o0), 0)
self.assertEqual(-(0o20), -16)
self.assertEqual(-(0o17777777777), -2147483647)
self.assertEqual(-(0o777777777777777777777), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-0o0, 0)
self.assertEqual(-0o20, -16)
self.assertEqual(-0o17777777777, -2147483647)
self.assertEqual(-0o777777777777777777777, -9223372036854775807)
def test_oct_unsigned(self):
# Positive constants
self.assertEqual(020000000000, 2147483648L)
self.assertEqual(037777777777, 4294967295L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(020000000000), -2147483648L)
self.assertEqual(-(037777777777), -4294967295L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-020000000000, -2147483648L)
self.assertEqual(-037777777777, -4294967295L)
# Positive constants
self.assertEqual(01000000000000000000000, 9223372036854775808L)
self.assertEqual(01777777777777777777777, 18446744073709551615L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(01000000000000000000000), -9223372036854775808L)
self.assertEqual(-(01777777777777777777777), -18446744073709551615L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-01000000000000000000000, -9223372036854775808L)
self.assertEqual(-01777777777777777777777, -18446744073709551615L)
def test_oct_unsigned_new(self):
# Positive constants
self.assertEqual(0o20000000000, 2147483648L)
self.assertEqual(0o37777777777, 4294967295L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0o20000000000), -2147483648L)
self.assertEqual(-(0o37777777777), -4294967295L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0o20000000000, -2147483648L)
self.assertEqual(-0o37777777777, -4294967295L)
# Positive constants
self.assertEqual(0o1000000000000000000000, 9223372036854775808L)
self.assertEqual(0o1777777777777777777777, 18446744073709551615L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0o1000000000000000000000), -9223372036854775808L)
self.assertEqual(-(0o1777777777777777777777), -18446744073709551615L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0o1000000000000000000000, -9223372036854775808L)
self.assertEqual(-0o1777777777777777777777, -18446744073709551615L)
def test_bin_baseline(self):
# A few upper/lowercase tests
self.assertEqual(0b0, 0B0)
self.assertEqual(0b1, 0B1)
self.assertEqual(0b10101010101, 0B10101010101)
# Baseline tests
self.assertEqual(0b0, 0)
self.assertEqual(0b10000, 16)
self.assertEqual(0b1111111111111111111111111111111, 2147483647)
self.assertEqual(0b111111111111111111111111111111111111111111111111111111111111111, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0b0), 0)
self.assertEqual(-(0b10000), -16)
self.assertEqual(-(0b1111111111111111111111111111111), -2147483647)
self.assertEqual(-(0b111111111111111111111111111111111111111111111111111111111111111), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-0b0, 0)
self.assertEqual(-0b10000, -16)
self.assertEqual(-0b1111111111111111111111111111111, -2147483647)
self.assertEqual(-0b111111111111111111111111111111111111111111111111111111111111111, -9223372036854775807)
def test_bin_unsigned(self):
# Positive constants
self.assertEqual(0b10000000000000000000000000000000, 2147483648L)
self.assertEqual(0b11111111111111111111111111111111, 4294967295L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0b10000000000000000000000000000000), -2147483648L)
self.assertEqual(-(0b11111111111111111111111111111111), -4294967295L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0b10000000000000000000000000000000, -2147483648L)
self.assertEqual(-0b11111111111111111111111111111111, -4294967295L)
# Positive constants
self.assertEqual(0b1000000000000000000000000000000000000000000000000000000000000000, 9223372036854775808L)
self.assertEqual(0b1111111111111111111111111111111111111111111111111111111111111111, 18446744073709551615L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0b1000000000000000000000000000000000000000000000000000000000000000), -9223372036854775808L)
self.assertEqual(-(0b1111111111111111111111111111111111111111111111111111111111111111), -18446744073709551615L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0b1000000000000000000000000000000000000000000000000000000000000000, -9223372036854775808L)
self.assertEqual(-0b1111111111111111111111111111111111111111111111111111111111111111, -18446744073709551615L)
def test_main():
test_support.run_unittest(TestHexOctBin)
if __name__ == "__main__":
test_main()
| mit |
designatednerd/Vokoder | SampleProject/Pods/xUnique/setup.py | 15 | 2107 | #!/usr/bin/env python
"""
This software is licensed under the Apache 2 license, quoted below.
Copyright 2014 Xiao Wang <wangxiao8611@gmail.com, http://fclef.wordpress.com/about>
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
"""
try:
from setuptools import setup
except ImportError:
raise SystemExit('Please install "setuptools" module first.')
from os import path
from codecs import open as copen
here = path.dirname(path.abspath(__file__))
readme_path = path.join(here, 'README.rst')
with copen(readme_path, encoding='utf-8') as f:
long_description = f.read()
__version__ = '4.1.2'
setup(
name="xUnique",
version=__version__,
py_modules=['xUnique'],
entry_points = {
'console_scripts' : [ 'xunique=xUnique:main' ],
},
description='A converter of the Xcode project file to make merging it much easier in VCS',
long_description=long_description,
author='Xiao Wang',
author_email='wangxiao8611@gmail.com',
url='https://github.com/truebit/xUnique',
license='Apache License, Version 2.0',
keywords=['Xcode project file', 'pbxproj', 'resolve merge conflict'],
classifiers=['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Version Control',
'Programming Language :: Objective C',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7'
],
)
| mit |
poojanihalani/xmlutils.py | setup.py | 3 | 1088 | #!/usr/bin/env python
from setuptools import setup
setup(
name="xmlutils",
version="1.1",
description="A set of utilities for processing XML documents and converting to other formats",
author="Kailash Nadh",
author_email="kailash.nadh@gmail.com",
url="http://nadh.in/code/xmlutils.py",
packages=['xmlutils'],
download_url="http://github.com/knadh/xmlutils.py",
license="MIT License",
entry_points = {
'console_scripts': [
'xml2sql = xmlutils.console:run_xml2sql',
'xml2csv = xmlutils.console:run_xml2csv',
'xml2json = xmlutils.console:run_xml2json',
'xmltable2csv = xmlutils.console:run_xmltable2csv'
],
},
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Markup :: XML",
"Topic :: Software Development :: Libraries"
]
)
| mit |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/sympy/polys/tests/test_sqfreetools.py | 6 | 5117 | """Tests for square-free decomposition algorithms and related tools. """
from sympy.polys.sqfreetools import (
dup_sqf_p, dmp_sqf_p,
dup_sqf_norm, dmp_sqf_norm,
dup_sqf_part, dmp_sqf_part,
dup_sqf_list, dup_sqf_list_include,
dmp_sqf_list, dmp_sqf_list_include,
dup_gff_list, dmp_gff_list)
from sympy.polys.euclidtools import (
dmp_resultant)
from sympy.polys.densearith import (
dmp_neg, dmp_sub, dmp_mul, dmp_sqr)
from sympy.polys.densetools import (
dmp_diff)
from sympy.polys.polyclasses import (
DMP
)
from sympy.polys.polyerrors import (
DomainError)
from sympy.polys.specialpolys import (
f_0, f_1, f_2, f_3, f_4, f_5, f_6)
from sympy.polys.domains import FF, ZZ, QQ
from sympy.abc import x
from sympy.utilities.pytest import raises
def test_dup_sqf():
assert dup_sqf_part([], ZZ) == []
assert dup_sqf_p([], ZZ) == True
assert dup_sqf_part([7], ZZ) == [1]
assert dup_sqf_p([7], ZZ) == True
assert dup_sqf_part([2,2], ZZ) == [1,1]
assert dup_sqf_p([2,2], ZZ) == True
assert dup_sqf_part([1,0,1,1], ZZ) == [1,0,1,1]
assert dup_sqf_p([1,0,1,1], ZZ) == True
assert dup_sqf_part([-1,0,1,1], ZZ) == [1,0,-1,-1]
assert dup_sqf_p([-1,0,1,1], ZZ) == True
assert dup_sqf_part([2,3,0,0], ZZ) == [2,3,0]
assert dup_sqf_p([2,3,0,0], ZZ) == False
assert dup_sqf_part([-2,3,0,0], ZZ) == [2,-3,0]
assert dup_sqf_p([-2,3,0,0], ZZ) == False
assert dup_sqf_list([], ZZ) == (0, [])
assert dup_sqf_list([1], ZZ) == (1, [])
assert dup_sqf_list([1,0], ZZ) == (1, [([1,0], 1)])
assert dup_sqf_list([2,0,0], ZZ) == (2, [([1,0], 2)])
assert dup_sqf_list([3,0,0,0], ZZ) == (3, [([1,0], 3)])
assert dup_sqf_list([ZZ(2),ZZ(4),ZZ(2)], ZZ) == \
(ZZ(2), [([ZZ(1),ZZ(1)], 2)])
assert dup_sqf_list([QQ(2),QQ(4),QQ(2)], QQ) == \
(QQ(2), [([QQ(1),QQ(1)], 2)])
assert dup_sqf_list([-1,1,0,0,1,-1], ZZ) == \
(-1, [([1,1,1,1], 1), ([1,-1], 2)])
assert dup_sqf_list([1,0,6,0,12,0,8,0,0], ZZ) == \
(1, [([1,0], 2), ([1,0,2], 3)])
K = FF(2)
f = map(K, [1,0,1])
assert dup_sqf_list(f, K) == \
(K(1), [([K(1),K(1)], 2)])
K = FF(3)
f = map(K, [1,0,0,2,0,0,2,0,0,1,0])
assert dup_sqf_list(f, K) == \
(K(1), [([K(1), K(0)], 1),
([K(1), K(1)], 3),
([K(1), K(2)], 6)])
f = [1,0,0,1]
g = map(K, f)
assert dup_sqf_part(f, ZZ) == f
assert dup_sqf_part(g, K) == [K(1), K(1)]
assert dup_sqf_p(f, ZZ) == True
assert dup_sqf_p(g, K) == False
A = [[1],[],[-3],[],[6]]
D = [[1],[],[-5],[],[5],[],[4]]
f, g = D, dmp_sub(A, dmp_mul(dmp_diff(D, 1, 1, ZZ), [[1,0]], 1, ZZ), 1, ZZ)
res = dmp_resultant(f, g, 1, ZZ)
assert dup_sqf_list(res, ZZ) == (45796, [([4,0,1], 3)])
assert dup_sqf_list_include([DMP([1, 0, 0, 0], ZZ), DMP([], ZZ), DMP([], ZZ)], ZZ[x]) == \
[([DMP([1, 0, 0, 0], ZZ)], 1), ([DMP([1], ZZ), DMP([], ZZ)], 2)]
def test_dmp_sqf():
assert dmp_sqf_part([[]], 1, ZZ) == [[]]
assert dmp_sqf_p([[]], 1, ZZ) == True
assert dmp_sqf_part([[7]], 1, ZZ) == [[1]]
assert dmp_sqf_p([[7]], 1, ZZ) == True
assert dmp_sqf_p(f_0, 2, ZZ) == True
assert dmp_sqf_p(dmp_sqr(f_0, 2, ZZ), 2, ZZ) == False
assert dmp_sqf_p(f_1, 2, ZZ) == True
assert dmp_sqf_p(dmp_sqr(f_1, 2, ZZ), 2, ZZ) == False
assert dmp_sqf_p(f_2, 2, ZZ) == True
assert dmp_sqf_p(dmp_sqr(f_2, 2, ZZ), 2, ZZ) == False
assert dmp_sqf_p(f_3, 2, ZZ) == True
assert dmp_sqf_p(dmp_sqr(f_3, 2, ZZ), 2, ZZ) == False
assert dmp_sqf_p(f_5, 2, ZZ) == False
assert dmp_sqf_p(dmp_sqr(f_5, 2, ZZ), 2, ZZ) == False
assert dmp_sqf_p(f_4, 2, ZZ) == True
assert dmp_sqf_part(f_4, 2, ZZ) == dmp_neg(f_4, 2, ZZ)
assert dmp_sqf_p(f_6, 3, ZZ) == True
assert dmp_sqf_part(f_6, 3, ZZ) == f_6
assert dmp_sqf_part(f_5, 2, ZZ) == [[[1]], [[1], [-1, 0]]]
assert dup_sqf_list([], ZZ) == (ZZ(0), [])
assert dup_sqf_list_include([], ZZ) == [([], 1)]
assert dmp_sqf_list([[ZZ(3)]], 1, ZZ) == (ZZ(3), [])
assert dmp_sqf_list_include([[ZZ(3)]], 1, ZZ) == [([[ZZ(3)]], 1)]
f = [-1,1,0,0,1,-1]
assert dmp_sqf_list(f, 0, ZZ) == \
(-1, [([1,1,1,1], 1), ([1,-1], 2)])
assert dmp_sqf_list_include(f, 0, ZZ) == \
[([-1,-1,-1,-1], 1), ([1,-1], 2)]
f = [[-1],[1],[],[],[1],[-1]]
assert dmp_sqf_list(f, 1, ZZ) == \
(-1, [([[1],[1],[1],[1]], 1), ([[1],[-1]], 2)])
assert dmp_sqf_list_include(f, 1, ZZ) == \
[([[-1],[-1],[-1],[-1]], 1), ([[1],[-1]], 2)]
K = FF(2)
f = [[-1], [2], [-1]]
assert dmp_sqf_list_include(f, 1, ZZ) == \
[([[-1]], 1), ([[1], [-1]], 2)]
raises(DomainError, "dmp_sqf_list([[K(1), K(0), K(1)]], 1, K)")
def test_dup_gff_list():
f = [1, 2, -1, -2, 0, 0]
assert dup_gff_list(f, ZZ) == [([1, 0], 1), ([1, 2], 4)]
g = [1, -20, 166, -744, 1965, -3132, 2948, -1504, 320, 0]
assert dup_gff_list(g, ZZ) == [([1, -5, 4], 1), ([1, -5, 4], 2), ([1, 0], 3)]
raises(ValueError, "dup_gff_list([], ZZ)")
| agpl-3.0 |
BeATz-UnKNoWN/python-for-android | python3-alpha/python3-src/Lib/zipfile.py | 45 | 57538 | """
Read and write ZIP files.
XXX references to utf-8 need further investigation.
"""
import io
import os
import re
import imp
import sys
import time
import stat
import shutil
import struct
import binascii
try:
import zlib # We may need its compression method
crc32 = zlib.crc32
except ImportError:
zlib = None
crc32 = binascii.crc32
__all__ = ["BadZipFile", "BadZipfile", "error", "ZIP_STORED", "ZIP_DEFLATED",
"is_zipfile", "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile"]
class BadZipFile(Exception):
pass
class LargeZipFile(Exception):
"""
Raised when writing a zipfile, the zipfile requires ZIP64 extensions
and those extensions are disabled.
"""
error = BadZipfile = BadZipFile # Pre-3.2 compatibility names
ZIP64_LIMIT = (1 << 31) - 1
ZIP_FILECOUNT_LIMIT = 1 << 16
ZIP_MAX_COMMENT = (1 << 16) - 1
# constants for Zip file compression methods
ZIP_STORED = 0
ZIP_DEFLATED = 8
# Other ZIP compression methods not supported
# Below are some formats and associated data for reading/writing headers using
# the struct module. The names and structures of headers/records are those used
# in the PKWARE description of the ZIP file format:
# http://www.pkware.com/documents/casestudies/APPNOTE.TXT
# (URL valid as of January 2008)
# The "end of central directory" structure, magic number, size, and indices
# (section V.I in the format document)
structEndArchive = b"<4s4H2LH"
stringEndArchive = b"PK\005\006"
sizeEndCentDir = struct.calcsize(structEndArchive)
_ECD_SIGNATURE = 0
_ECD_DISK_NUMBER = 1
_ECD_DISK_START = 2
_ECD_ENTRIES_THIS_DISK = 3
_ECD_ENTRIES_TOTAL = 4
_ECD_SIZE = 5
_ECD_OFFSET = 6
_ECD_COMMENT_SIZE = 7
# These last two indices are not part of the structure as defined in the
# spec, but they are used internally by this module as a convenience
_ECD_COMMENT = 8
_ECD_LOCATION = 9
# The "central directory" structure, magic number, size, and indices
# of entries in the structure (section V.F in the format document)
structCentralDir = "<4s4B4HL2L5H2L"
stringCentralDir = b"PK\001\002"
sizeCentralDir = struct.calcsize(structCentralDir)
# indexes of entries in the central directory structure
_CD_SIGNATURE = 0
_CD_CREATE_VERSION = 1
_CD_CREATE_SYSTEM = 2
_CD_EXTRACT_VERSION = 3
_CD_EXTRACT_SYSTEM = 4
_CD_FLAG_BITS = 5
_CD_COMPRESS_TYPE = 6
_CD_TIME = 7
_CD_DATE = 8
_CD_CRC = 9
_CD_COMPRESSED_SIZE = 10
_CD_UNCOMPRESSED_SIZE = 11
_CD_FILENAME_LENGTH = 12
_CD_EXTRA_FIELD_LENGTH = 13
_CD_COMMENT_LENGTH = 14
_CD_DISK_NUMBER_START = 15
_CD_INTERNAL_FILE_ATTRIBUTES = 16
_CD_EXTERNAL_FILE_ATTRIBUTES = 17
_CD_LOCAL_HEADER_OFFSET = 18
# The "local file header" structure, magic number, size, and indices
# (section V.A in the format document)
structFileHeader = "<4s2B4HL2L2H"
stringFileHeader = b"PK\003\004"
sizeFileHeader = struct.calcsize(structFileHeader)
_FH_SIGNATURE = 0
_FH_EXTRACT_VERSION = 1
_FH_EXTRACT_SYSTEM = 2
_FH_GENERAL_PURPOSE_FLAG_BITS = 3
_FH_COMPRESSION_METHOD = 4
_FH_LAST_MOD_TIME = 5
_FH_LAST_MOD_DATE = 6
_FH_CRC = 7
_FH_COMPRESSED_SIZE = 8
_FH_UNCOMPRESSED_SIZE = 9
_FH_FILENAME_LENGTH = 10
_FH_EXTRA_FIELD_LENGTH = 11
# The "Zip64 end of central directory locator" structure, magic number, and size
structEndArchive64Locator = "<4sLQL"
stringEndArchive64Locator = b"PK\x06\x07"
sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator)
# The "Zip64 end of central directory" record, magic number, size, and indices
# (section V.G in the format document)
structEndArchive64 = "<4sQ2H2L4Q"
stringEndArchive64 = b"PK\x06\x06"
sizeEndCentDir64 = struct.calcsize(structEndArchive64)
_CD64_SIGNATURE = 0
_CD64_DIRECTORY_RECSIZE = 1
_CD64_CREATE_VERSION = 2
_CD64_EXTRACT_VERSION = 3
_CD64_DISK_NUMBER = 4
_CD64_DISK_NUMBER_START = 5
_CD64_NUMBER_ENTRIES_THIS_DISK = 6
_CD64_NUMBER_ENTRIES_TOTAL = 7
_CD64_DIRECTORY_SIZE = 8
_CD64_OFFSET_START_CENTDIR = 9
def _check_zipfile(fp):
try:
if _EndRecData(fp):
return True # file has correct magic number
except IOError:
pass
return False
def is_zipfile(filename):
"""Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too.
"""
result = False
try:
if hasattr(filename, "read"):
result = _check_zipfile(fp=filename)
else:
with open(filename, "rb") as fp:
result = _check_zipfile(fp)
except IOError:
pass
return result
def _EndRecData64(fpin, offset, endrec):
"""
Read the ZIP64 end-of-archive records and use that to update endrec
"""
try:
fpin.seek(offset - sizeEndCentDir64Locator, 2)
except IOError:
# If the seek fails, the file is not large enough to contain a ZIP64
# end-of-archive record, so just return the end record we were given.
return endrec
data = fpin.read(sizeEndCentDir64Locator)
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
if sig != stringEndArchive64Locator:
return endrec
if diskno != 0 or disks != 1:
raise BadZipFile("zipfiles that span multiple disks are not supported")
# Assume no 'zip64 extensible data'
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
data = fpin.read(sizeEndCentDir64)
sig, sz, create_version, read_version, disk_num, disk_dir, \
dircount, dircount2, dirsize, diroffset = \
struct.unpack(structEndArchive64, data)
if sig != stringEndArchive64:
return endrec
# Update the original endrec using data from the ZIP64 record
endrec[_ECD_SIGNATURE] = sig
endrec[_ECD_DISK_NUMBER] = disk_num
endrec[_ECD_DISK_START] = disk_dir
endrec[_ECD_ENTRIES_THIS_DISK] = dircount
endrec[_ECD_ENTRIES_TOTAL] = dircount2
endrec[_ECD_SIZE] = dirsize
endrec[_ECD_OFFSET] = diroffset
return endrec
def _EndRecData(fpin):
"""Return data from the "End of Central Directory" record, or None.
The data is a list of the nine items in the ZIP "End of central dir"
record followed by a tenth item, the file seek offset of this record."""
# Determine file size
fpin.seek(0, 2)
filesize = fpin.tell()
# Check to see if this is ZIP file with no archive comment (the
# "end of central directory" structure should be the last item in the
# file if this is the case).
try:
fpin.seek(-sizeEndCentDir, 2)
except IOError:
return None
data = fpin.read()
if data[0:4] == stringEndArchive and data[-2:] == b"\000\000":
# the signature is correct and there's no comment, unpack structure
endrec = struct.unpack(structEndArchive, data)
endrec=list(endrec)
# Append a blank comment and record start offset
endrec.append(b"")
endrec.append(filesize - sizeEndCentDir)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, -sizeEndCentDir, endrec)
# Either this is not a ZIP file, or it is a ZIP file with an archive
# comment. Search the end of the file for the "end of central directory"
# record signature. The comment is the last item in the ZIP file and may be
# up to 64K long. It is assumed that the "end of central directory" magic
# number does not appear in the comment.
maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
fpin.seek(maxCommentStart, 0)
data = fpin.read()
start = data.rfind(stringEndArchive)
if start >= 0:
# found the magic number; attempt to unpack and interpret
recData = data[start:start+sizeEndCentDir]
endrec = list(struct.unpack(structEndArchive, recData))
commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file
comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize]
endrec.append(comment)
endrec.append(maxCommentStart + start)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, maxCommentStart + start - filesize,
endrec)
# Unable to find a valid end of central directory structure
return
class ZipInfo (object):
"""Class with attributes describing each file in the ZIP archive."""
__slots__ = (
'orig_filename',
'filename',
'date_time',
'compress_type',
'comment',
'extra',
'create_system',
'create_version',
'extract_version',
'reserved',
'flag_bits',
'volume',
'internal_attr',
'external_attr',
'header_offset',
'CRC',
'compress_size',
'file_size',
'_raw_time',
)
def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
self.orig_filename = filename # Original file name in archive
# Terminate the file name at the first null byte. Null bytes in file
# names are used as tricks by viruses in archives.
null_byte = filename.find(chr(0))
if null_byte >= 0:
filename = filename[0:null_byte]
# This is used to ensure paths in generated ZIP files always use
# forward slashes as the directory separator, as required by the
# ZIP format specification.
if os.sep != "/" and os.sep in filename:
filename = filename.replace(os.sep, "/")
self.filename = filename # Normalized file name
self.date_time = date_time # year, month, day, hour, min, sec
# Standard values:
self.compress_type = ZIP_STORED # Type of compression for the file
self.comment = b"" # Comment for each file
self.extra = b"" # ZIP extra data
if sys.platform == 'win32':
self.create_system = 0 # System which created ZIP archive
else:
# Assume everything else is unix-y
self.create_system = 3 # System which created ZIP archive
self.create_version = 20 # Version which created ZIP archive
self.extract_version = 20 # Version needed to extract archive
self.reserved = 0 # Must be zero
self.flag_bits = 0 # ZIP flag bits
self.volume = 0 # Volume number of file header
self.internal_attr = 0 # Internal attributes
self.external_attr = 0 # External file attributes
# Other attributes are set by class ZipFile:
# header_offset Byte offset to the file header
# CRC CRC-32 of the uncompressed file
# compress_size Size of the compressed file
# file_size Size of the uncompressed file
def FileHeader(self):
"""Return the per-file header as a string."""
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0
else:
CRC = self.CRC
compress_size = self.compress_size
file_size = self.file_size
extra = self.extra
if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
# File is larger than what fits into a 4 byte integer,
# fall back to the ZIP64 extension
fmt = '<HHQQ'
extra = extra + struct.pack(fmt,
1, struct.calcsize(fmt)-4, file_size, compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
self.extract_version = max(45, self.extract_version)
self.create_version = max(45, self.extract_version)
filename, flag_bits = self._encodeFilenameFlags()
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, flag_bits,
self.compress_type, dostime, dosdate, CRC,
compress_size, file_size,
len(filename), len(extra))
return header + filename + extra
def _encodeFilenameFlags(self):
try:
return self.filename.encode('ascii'), self.flag_bits
except UnicodeEncodeError:
return self.filename.encode('utf-8'), self.flag_bits | 0x800
def _decodeExtra(self):
# Try to decode the extra field.
extra = self.extra
unpack = struct.unpack
while extra:
tp, ln = unpack('<HH', extra[:4])
if tp == 1:
if ln >= 24:
counts = unpack('<QQQ', extra[4:28])
elif ln == 16:
counts = unpack('<QQ', extra[4:20])
elif ln == 8:
counts = unpack('<Q', extra[4:12])
elif ln == 0:
counts = ()
else:
raise RuntimeError("Corrupt extra field %s"%(ln,))
idx = 0
# ZIP64 extension (large files and/or large archives)
if self.file_size in (0xffffffffffffffff, 0xffffffff):
self.file_size = counts[idx]
idx += 1
if self.compress_size == 0xFFFFFFFF:
self.compress_size = counts[idx]
idx += 1
if self.header_offset == 0xffffffff:
old = self.header_offset
self.header_offset = counts[idx]
idx+=1
extra = extra[ln+4:]
class _ZipDecrypter:
"""Class to handle decryption of files stored within a ZIP archive.
ZIP supports a password-based form of encryption. Even though known
plaintext attacks have been found against it, it is still useful
to be able to get data out of such a file.
Usage:
zd = _ZipDecrypter(mypwd)
plain_char = zd(cypher_char)
plain_text = map(zd, cypher_text)
"""
def _GenerateCRCTable():
"""Generate a CRC-32 table.
ZIP encryption uses the CRC32 one-byte primitive for scrambling some
internal keys. We noticed that a direct implementation is faster than
relying on binascii.crc32().
"""
poly = 0xedb88320
table = [0] * 256
for i in range(256):
crc = i
for j in range(8):
if crc & 1:
crc = ((crc >> 1) & 0x7FFFFFFF) ^ poly
else:
crc = ((crc >> 1) & 0x7FFFFFFF)
table[i] = crc
return table
crctable = _GenerateCRCTable()
def _crc32(self, ch, crc):
"""Compute the CRC32 primitive on one byte."""
return ((crc >> 8) & 0xffffff) ^ self.crctable[(crc ^ ch) & 0xff]
def __init__(self, pwd):
self.key0 = 305419896
self.key1 = 591751049
self.key2 = 878082192
for p in pwd:
self._UpdateKeys(p)
def _UpdateKeys(self, c):
self.key0 = self._crc32(c, self.key0)
self.key1 = (self.key1 + (self.key0 & 255)) & 4294967295
self.key1 = (self.key1 * 134775813 + 1) & 4294967295
self.key2 = self._crc32((self.key1 >> 24) & 255, self.key2)
def __call__(self, c):
"""Decrypt a single character."""
assert isinstance(c, int)
k = self.key2 | 2
c = c ^ (((k * (k^1)) >> 8) & 255)
self._UpdateKeys(c)
return c
class ZipExtFile(io.BufferedIOBase):
"""File-like object for reading an archive member.
Is returned by ZipFile.open().
"""
# Max size supported by decompressor.
MAX_N = 1 << 31 - 1
# Read from compressed files in 4k blocks.
MIN_READ_SIZE = 4096
# Search for universal newlines or line chunks.
PATTERN = re.compile(br'^(?P<chunk>[^\r\n]+)|(?P<newline>\n|\r\n?)')
def __init__(self, fileobj, mode, zipinfo, decrypter=None,
close_fileobj=False):
self._fileobj = fileobj
self._decrypter = decrypter
self._close_fileobj = close_fileobj
self._compress_type = zipinfo.compress_type
self._compress_size = zipinfo.compress_size
self._compress_left = zipinfo.compress_size
if self._compress_type == ZIP_DEFLATED:
self._decompressor = zlib.decompressobj(-15)
self._unconsumed = b''
self._readbuffer = b''
self._offset = 0
self._universal = 'U' in mode
self.newlines = None
# Adjust read size for encrypted files since the first 12 bytes
# are for the encryption/password information.
if self._decrypter is not None:
self._compress_left -= 12
self.mode = mode
self.name = zipinfo.filename
if hasattr(zipinfo, 'CRC'):
self._expected_crc = zipinfo.CRC
self._running_crc = crc32(b'') & 0xffffffff
else:
self._expected_crc = None
def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if not self._universal and limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find(b'\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
if not self._universal:
return io.BufferedIOBase.readline(self, limit)
line = b''
while limit < 0 or len(line) < limit:
readahead = self.peek(2)
if readahead == b'':
return line
#
# Search for universal newlines or line chunks.
#
# The pattern returns either a line chunk or a newline, but not
# both. Combined with peek(2), we are assured that the sequence
# '\r\n' is always retrieved completely and never split into
# separate newlines - '\r', '\n' due to coincidental readaheads.
#
match = self.PATTERN.search(readahead)
newline = match.group('newline')
if newline is not None:
if self.newlines is None:
self.newlines = []
if newline not in self.newlines:
self.newlines.append(newline)
self._offset += len(newline)
return line + b'\n'
chunk = match.group('chunk')
if limit >= 0:
chunk = chunk[: limit - len(line)]
self._offset += len(chunk)
line += chunk
return line
def peek(self, n=1):
"""Returns buffered bytes without advancing the position."""
if n > len(self._readbuffer) - self._offset:
chunk = self.read(n)
self._offset -= len(chunk)
# Return up to 512 bytes to reduce allocation overhead for tight loops.
return self._readbuffer[self._offset: self._offset + 512]
def readable(self):
return True
def read(self, n=-1):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, data is read and returned until EOF is reached..
"""
buf = b''
if n is None:
n = -1
while True:
if n < 0:
data = self.read1(n)
elif n > len(buf):
data = self.read1(n - len(buf))
else:
return buf
if len(data) == 0:
return buf
buf += data
def _update_crc(self, newdata, eof):
# Update the CRC using the given data.
if self._expected_crc is None:
# No need to compute the CRC if we don't have a reference value
return
self._running_crc = crc32(newdata, self._running_crc) & 0xffffffff
# Check the CRC if we're at the end of the file
if eof and self._running_crc != self._expected_crc:
raise BadZipFile("Bad CRC-32 for file %r" % self.name)
def read1(self, n):
"""Read up to n bytes with at most one read() system call."""
# Simplify algorithm (branching) by transforming negative n to large n.
if n < 0 or n is None:
n = self.MAX_N
# Bytes available in read buffer.
len_readbuffer = len(self._readbuffer) - self._offset
# Read from file.
if self._compress_left > 0 and n > len_readbuffer + len(self._unconsumed):
nbytes = n - len_readbuffer - len(self._unconsumed)
nbytes = max(nbytes, self.MIN_READ_SIZE)
nbytes = min(nbytes, self._compress_left)
data = self._fileobj.read(nbytes)
self._compress_left -= len(data)
if data and self._decrypter is not None:
data = bytes(map(self._decrypter, data))
if self._compress_type == ZIP_STORED:
self._update_crc(data, eof=(self._compress_left==0))
self._readbuffer = self._readbuffer[self._offset:] + data
self._offset = 0
else:
# Prepare deflated bytes for decompression.
self._unconsumed += data
# Handle unconsumed data.
if (len(self._unconsumed) > 0 and n > len_readbuffer and
self._compress_type == ZIP_DEFLATED):
data = self._decompressor.decompress(
self._unconsumed,
max(n - len_readbuffer, self.MIN_READ_SIZE)
)
self._unconsumed = self._decompressor.unconsumed_tail
eof = len(self._unconsumed) == 0 and self._compress_left == 0
if eof:
data += self._decompressor.flush()
self._update_crc(data, eof=eof)
self._readbuffer = self._readbuffer[self._offset:] + data
self._offset = 0
# Read from buffer.
data = self._readbuffer[self._offset: self._offset + n]
self._offset += len(data)
return data
def close(self):
try:
if self._close_fileobj:
self._fileobj.close()
finally:
super().close()
class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=False)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read "r", write "w" or append "a".
compression: ZIP_STORED (no compression) or ZIP_DEFLATED (requires zlib).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=False):
"""Open the ZIP file with mode read "r", write "w" or append "a"."""
if mode not in ("r", "w", "a"):
raise RuntimeError('ZipFile() requires mode "r", "w", or "a"')
if compression == ZIP_STORED:
pass
elif compression == ZIP_DEFLATED:
if not zlib:
raise RuntimeError(
"Compression requires the (missing) zlib module")
else:
raise RuntimeError("That compression method is not supported")
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = key = mode.replace('b', '')[0]
self.pwd = None
self.comment = b''
# Check if we were passed a file-like object
if isinstance(file, str):
# No, it's a filename
self._filePassed = 0
self.filename = file
modeDict = {'r' : 'rb', 'w': 'wb', 'a' : 'r+b'}
try:
self.fp = io.open(file, modeDict[mode])
except IOError:
if mode == 'a':
mode = key = 'w'
self.fp = io.open(file, modeDict[mode])
else:
raise
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
if key == 'r':
self._GetContents()
elif key == 'w':
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
elif key == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir, 0)
except BadZipFile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
else:
if not self._filePassed:
self.fp.close()
self.fp = None
raise RuntimeError('Mode must be "r", "w" or "a"')
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _GetContents(self):
"""Read the directory, making sure we close the file if the format
is bad."""
try:
self._RealGetContents()
except BadZipFile:
if not self._filePassed:
self.fp.close()
self.fp = None
raise
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except IOError:
raise BadZipFile("File is not a zip file")
if not endrec:
raise BadZipFile("File is not a zip file")
if self.debug > 1:
print(endrec)
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self.comment = endrec[_ECD_COMMENT] # archive comment
# "concat" is zero, unless zip was concatenated to another file
concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# If Zip64 extension structures are present, account for them
concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
if self.debug > 2:
inferred = concat + offset_cd
print("given, inferred, offset", offset_cd, inferred, concat)
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = io.BytesIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if centdir[0:4] != stringCentralDir:
raise BadZipFile("Bad magic number for central directory")
centdir = struct.unpack(structCentralDir, centdir)
if self.debug > 2:
print(centdir)
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
flags = centdir[5]
if flags & 0x800:
# UTF-8 file names extension
filename = filename.decode('utf-8')
else:
# Historical ZIP filename encoding
filename = filename.decode('cp437')
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
x._decodeExtra()
x.header_offset = x.header_offset + concat
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
+ centdir[_CD_EXTRA_FIELD_LENGTH]
+ centdir[_CD_COMMENT_LENGTH])
if self.debug > 2:
print("total", total)
def namelist(self):
"""Return a list of file names in the archive."""
l = []
for data in self.filelist:
l.append(data.filename)
return l
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self, file=None):
"""Print a table of contents for the zip file."""
print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"),
file=file)
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size),
file=file)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
f = self.open(zinfo.filename, "r")
while f.read(chunk_size): # Check CRC-32
pass
except BadZipFile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if pwd:
self.pwd = pwd
else:
self.pwd = None
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
with self.open(name, "r", pwd) as fp:
return fp.read()
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError('open() requires mode "r", "U", or "rU"')
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if not self.fp:
raise RuntimeError(
"Attempt to read ZIP archive that was already closed")
# Only open a new file for instances where we were not
# given a file object in the constructor
if self._filePassed:
zef_file = self.fp
else:
zef_file = io.open(self.filename, 'rb')
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
try:
zinfo = self.getinfo(name)
except KeyError:
if not self._filePassed:
zef_file.close()
raise
zef_file.seek(zinfo.header_offset, 0)
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if fheader[0:4] != stringFileHeader:
raise BadZipFile("Bad magic number for file header")
fheader = struct.unpack(structFileHeader, fheader)
fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
if zinfo.flag_bits & 0x800:
# UTF-8 filename
fname_str = fname.decode("utf-8")
else:
fname_str = fname.decode("cp437")
if fname_str != zinfo.orig_filename:
if not self._filePassed:
zef_file.close()
raise BadZipFile(
'File name in directory %r and header %r differ.'
% (zinfo.orig_filename, fname))
# check for encrypted flag & handle password
is_encrypted = zinfo.flag_bits & 0x1
zd = None
if is_encrypted:
if not pwd:
pwd = self.pwd
if not pwd:
if not self._filePassed:
zef_file.close()
raise RuntimeError("File %s is encrypted, "
"password required for extraction" % name)
zd = _ZipDecrypter(pwd)
# The first 12 bytes in the cypher stream is an encryption header
# used to strengthen the algorithm. The first 11 bytes are
# completely random, while the 12th contains the MSB of the CRC,
# or the MSB of the file time depending on the header type
# and is used to check the correctness of the password.
header = zef_file.read(12)
h = list(map(zd, header[0:12]))
if zinfo.flag_bits & 0x8:
# compare against the file type from extended local headers
check_byte = (zinfo._raw_time >> 8) & 0xff
else:
# compare against the CRC otherwise
check_byte = (zinfo.CRC >> 24) & 0xff
if h[11] != check_byte:
if not self._filePassed:
zef_file.close()
raise RuntimeError("Bad password for file", name)
return ZipExtFile(zef_file, mode, zinfo, zd,
close_fileobj=not self._filePassed)
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
# Strip trailing path separator, unless it represents the root.
if (targetpath[-1:] in (os.path.sep, os.path.altsep)
and len(os.path.splitdrive(targetpath)[1]) > 1):
targetpath = targetpath[:-1]
# don't include leading "/" from file name if present
if member.filename[0] == '/':
targetpath = os.path.join(targetpath, member.filename[1:])
else:
targetpath = os.path.join(targetpath, member.filename)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
source = self.open(member, pwd=pwd)
target = open(targetpath, "wb")
shutil.copyfileobj(source, target)
source.close()
target.close()
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
if self.debug: # Warning for duplicate names
print("Duplicate name:", zinfo.filename)
if self.mode not in ("w", "a"):
raise RuntimeError('write() requires mode "w" or "a"')
if not self.fp:
raise RuntimeError(
"Attempt to write ZIP archive that was already closed")
if zinfo.compress_type == ZIP_DEFLATED and not zlib:
raise RuntimeError(
"Compression requires the (missing) zlib module")
if zinfo.compress_type not in (ZIP_STORED, ZIP_DEFLATED):
raise RuntimeError("That compression method is not supported")
if zinfo.file_size > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
if zinfo.header_offset > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile(
"Zipfile size would require ZIP64 extensions")
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
zinfo.header_offset = self.fp.tell() # Start of header bytes
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader())
return
with open(filename, "rb") as fp:
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
zinfo.file_size = file_size = 0
self.fp.write(zinfo.FileHeader())
if zinfo.compress_type == ZIP_DEFLATED:
cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
else:
cmpr = None
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
# Seek backwards and write CRC and file sizes
position = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset + 14, 0)
self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.seek(position, 0)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo_or_arcname, data, compress_type=None):
"""Write a file into the archive. The contents is 'data', which
may be either a 'str' or a 'bytes' instance; if it is a 'str',
it is encoded as UTF-8 first.
'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if isinstance(data, str):
data = data.encode("utf-8")
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
zinfo.external_attr = 0o600 << 16
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
zinfo.file_size = len(data) # Uncompressed size
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
self._writecheck(zinfo)
self._didModify = True
zinfo.CRC = crc32(data) & 0xffffffff # CRC-32 checksum
if zinfo.compress_type == ZIP_DEFLATED:
co = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
data = co.compress(data) + co.flush()
zinfo.compress_size = len(data) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zinfo.header_offset = self.fp.tell() # Start of header data
self.fp.write(zinfo.FileHeader())
self.fp.write(data)
self.fp.flush()
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode "w" and "a" write the ending
records."""
if self.fp is None:
return
if self.mode in ("w", "a") and self._didModify: # write ending records
count = 0
pos1 = self.fp.tell()
for zinfo in self.filelist: # write central directory
count = count + 1
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffff
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q'*len(extra),
1, 8*len(extra), *extra) + extra_data
extract_version = max(45, zinfo.extract_version)
create_version = max(45, zinfo.create_version)
else:
extract_version = zinfo.extract_version
create_version = zinfo.create_version
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print((structCentralDir, stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset), file=sys.stderr)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = count
centDirSize = pos2 - pos1
centDirOffset = pos1
if (centDirCount >= ZIP_FILECOUNT_LIMIT or
centDirOffset > ZIP64_LIMIT or
centDirSize > ZIP64_LIMIT):
# Need to write the ZIP64 end-of-archive records
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
# check for valid comment length
if len(self.comment) >= ZIP_MAX_COMMENT:
if self.debug > 0:
msg = 'Archive comment is too long; truncating to %d bytes' \
% ZIP_MAX_COMMENT
self.comment = self.comment[:ZIP_MAX_COMMENT]
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self.comment))
self.fp.write(endrec)
self.fp.write(self.comment)
self.fp.flush()
if not self._filePassed:
self.fp.close()
self.fp = None
class PyZipFile(ZipFile):
"""Class to create ZIP archives with Python library files and packages."""
def __init__(self, file, mode="r", compression=ZIP_STORED,
allowZip64=False, optimize=-1):
ZipFile.__init__(self, file, mode=mode, compression=compression,
allowZip64=allowZip64)
self._optimize = optimize
def writepy(self, pathname, basename=""):
"""Add all files from "pathname" to the ZIP archive.
If pathname is a package directory, search the directory and
all package subdirectories recursively for all *.py and enter
the modules into the archive. If pathname is a plain
directory, listdir *.py and enter all modules. Else, pathname
must be a Python *.py file and the module will be put into the
archive. Added modules are always module.pyo or module.pyc.
This method will compile the module.py into module.pyc if
necessary.
"""
dir, name = os.path.split(pathname)
if os.path.isdir(pathname):
initname = os.path.join(pathname, "__init__.py")
if os.path.isfile(initname):
# This is a package directory, add it
if basename:
basename = "%s/%s" % (basename, name)
else:
basename = name
if self.debug:
print("Adding package in", pathname, "as", basename)
fname, arcname = self._get_codename(initname[0:-3], basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
dirlist = os.listdir(pathname)
dirlist.remove("__init__.py")
# Add all *.py files and package subdirectories
for filename in dirlist:
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, "__init__.py")):
# This is a package directory, add it
self.writepy(path, basename) # Recursive call
elif ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
# This is NOT a package directory, add its files at top level
if self.debug:
print("Adding files from directory", pathname)
for filename in os.listdir(pathname):
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
if pathname[-3:] != ".py":
raise RuntimeError(
'Files added with writepy() must end with ".py"')
fname, arcname = self._get_codename(pathname[0:-3], basename)
if self.debug:
print("Adding file", arcname)
self.write(fname, arcname)
def _get_codename(self, pathname, basename):
"""Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string).
"""
def _compile(file, optimize=-1):
import py_compile
if self.debug:
print("Compiling", file)
try:
py_compile.compile(file, doraise=True, optimize=optimize)
except py_compile.PyCompileError as error:
print(err.msg)
return False
return True
file_py = pathname + ".py"
file_pyc = pathname + ".pyc"
file_pyo = pathname + ".pyo"
pycache_pyc = imp.cache_from_source(file_py, True)
pycache_pyo = imp.cache_from_source(file_py, False)
if self._optimize == -1:
# legacy mode: use whatever file is present
if (os.path.isfile(file_pyo) and
os.stat(file_pyo).st_mtime >= os.stat(file_py).st_mtime):
# Use .pyo file.
arcname = fname = file_pyo
elif (os.path.isfile(file_pyc) and
os.stat(file_pyc).st_mtime >= os.stat(file_py).st_mtime):
# Use .pyc file.
arcname = fname = file_pyc
elif (os.path.isfile(pycache_pyc) and
os.stat(pycache_pyc).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_pyc
arcname = file_pyc
elif (os.path.isfile(pycache_pyo) and
os.stat(pycache_pyo).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyo file, but write it to the legacy pyo
# file name in the archive.
fname = pycache_pyo
arcname = file_pyo
else:
# Compile py into PEP 3147 pyc file.
if _compile(file_py):
fname = (pycache_pyc if __debug__ else pycache_pyo)
arcname = (file_pyc if __debug__ else file_pyo)
else:
fname = arcname = file_py
else:
# new mode: use given optimization level
if self._optimize == 0:
fname = pycache_pyc
arcname = file_pyc
else:
fname = pycache_pyo
arcname = file_pyo
if not (os.path.isfile(fname) and
os.stat(fname).st_mtime >= os.stat(file_py).st_mtime):
if not _compile(file_py, optimize=self._optimize):
fname = arcname = file_py
archivename = os.path.split(arcname)[1]
if basename:
archivename = "%s/%s" % (basename, archivename)
return (fname, archivename)
def main(args = None):
import textwrap
USAGE=textwrap.dedent("""\
Usage:
zipfile.py -l zipfile.zip # Show listing of a zipfile
zipfile.py -t zipfile.zip # Test if a zipfile is valid
zipfile.py -e zipfile.zip target # Extract zipfile into target dir
zipfile.py -c zipfile.zip src ... # Create zipfile from sources
""")
if args is None:
args = sys.argv[1:]
if not args or args[0] not in ('-l', '-c', '-e', '-t'):
print(USAGE)
sys.exit(1)
if args[0] == '-l':
if len(args) != 2:
print(USAGE)
sys.exit(1)
zf = ZipFile(args[1], 'r')
zf.printdir()
zf.close()
elif args[0] == '-t':
if len(args) != 2:
print(USAGE)
sys.exit(1)
zf = ZipFile(args[1], 'r')
badfile = zf.testzip()
if badfile:
print("The following enclosed file is corrupted: {!r}".format(badfile))
print("Done testing")
elif args[0] == '-e':
if len(args) != 3:
print(USAGE)
sys.exit(1)
zf = ZipFile(args[1], 'r')
out = args[2]
for path in zf.namelist():
if path.startswith('./'):
tgt = os.path.join(out, path[2:])
else:
tgt = os.path.join(out, path)
tgtdir = os.path.dirname(tgt)
if not os.path.exists(tgtdir):
os.makedirs(tgtdir)
with open(tgt, 'wb') as fp:
fp.write(zf.read(path))
zf.close()
elif args[0] == '-c':
if len(args) < 3:
print(USAGE)
sys.exit(1)
def addToZip(zf, path, zippath):
if os.path.isfile(path):
zf.write(path, zippath, ZIP_DEFLATED)
elif os.path.isdir(path):
for nm in os.listdir(path):
addToZip(zf,
os.path.join(path, nm), os.path.join(zippath, nm))
# else: ignore
zf = ZipFile(args[1], 'w', allowZip64=True)
for src in args[2:]:
addToZip(zf, src, os.path.basename(src))
zf.close()
if __name__ == "__main__":
main()
| apache-2.0 |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/gunicorn/arbiter.py | 27 | 17557 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
from __future__ import print_function
import errno
import os
import random
import select
import signal
import sys
import time
import traceback
from gunicorn.errors import HaltServer, AppImportError
from gunicorn.pidfile import Pidfile
from gunicorn.sock import create_sockets
from gunicorn import util
from gunicorn import __version__, SERVER_SOFTWARE
class Arbiter(object):
"""
Arbiter maintain the workers processes alive. It launches or
kills them if needed. It also manages application reloading
via SIGHUP/USR2.
"""
# A flag indicating if a worker failed to
# to boot. If a worker process exist with
# this error code, the arbiter will terminate.
WORKER_BOOT_ERROR = 3
# A flag indicating if an application failed to be loaded
APP_LOAD_ERROR = 4
START_CTX = {}
LISTENERS = []
WORKERS = {}
PIPE = []
# I love dynamic languages
SIG_QUEUE = []
SIGNALS = [getattr(signal, "SIG%s" % x)
for x in "HUP QUIT INT TERM TTIN TTOU USR1 USR2 WINCH".split()]
SIG_NAMES = dict(
(getattr(signal, name), name[3:].lower()) for name in dir(signal)
if name[:3] == "SIG" and name[3] != "_"
)
def __init__(self, app):
os.environ["SERVER_SOFTWARE"] = SERVER_SOFTWARE
self._num_workers = None
self.setup(app)
self.pidfile = None
self.worker_age = 0
self.reexec_pid = 0
self.master_name = "Master"
cwd = util.getcwd()
args = sys.argv[:]
args.insert(0, sys.executable)
# init start context
self.START_CTX = {
"args": args,
"cwd": cwd,
0: sys.executable
}
def _get_num_workers(self):
return self._num_workers
def _set_num_workers(self, value):
old_value = self._num_workers
self._num_workers = value
self.cfg.nworkers_changed(self, value, old_value)
num_workers = property(_get_num_workers, _set_num_workers)
def setup(self, app):
self.app = app
self.cfg = app.cfg
self.log = self.cfg.logger_class(app.cfg)
# reopen files
if 'GUNICORN_FD' in os.environ:
self.log.reopen_files()
self.worker_class = self.cfg.worker_class
self.address = self.cfg.address
self.num_workers = self.cfg.workers
self.timeout = self.cfg.timeout
self.proc_name = self.cfg.proc_name
self.log.debug('Current configuration:\n{0}'.format(
'\n'.join(
' {0}: {1}'.format(config, value.value)
for config, value
in sorted(self.cfg.settings.items(),
key=lambda setting: setting[1]))))
# set enviroment' variables
if self.cfg.env:
for k, v in self.cfg.env.items():
os.environ[k] = v
if self.cfg.preload_app:
self.app.wsgi()
def start(self):
"""\
Initialize the arbiter. Start listening and set pidfile if needed.
"""
self.log.info("Starting gunicorn %s", __version__)
self.pid = os.getpid()
if self.cfg.pidfile is not None:
self.pidfile = Pidfile(self.cfg.pidfile)
self.pidfile.create(self.pid)
self.cfg.on_starting(self)
self.init_signals()
if not self.LISTENERS:
self.LISTENERS = create_sockets(self.cfg, self.log)
listeners_str = ",".join([str(l) for l in self.LISTENERS])
self.log.debug("Arbiter booted")
self.log.info("Listening at: %s (%s)", listeners_str, self.pid)
self.log.info("Using worker: %s", self.cfg.worker_class_str)
# check worker class requirements
if hasattr(self.worker_class, "check_config"):
self.worker_class.check_config(self.cfg, self.log)
self.cfg.when_ready(self)
def init_signals(self):
"""\
Initialize master signal handling. Most of the signals
are queued. Child signals only wake up the master.
"""
# close old PIPE
if self.PIPE:
[os.close(p) for p in self.PIPE]
# initialize the pipe
self.PIPE = pair = os.pipe()
for p in pair:
util.set_non_blocking(p)
util.close_on_exec(p)
self.log.close_on_exec()
# initialize all signals
[signal.signal(s, self.signal) for s in self.SIGNALS]
signal.signal(signal.SIGCHLD, self.handle_chld)
def signal(self, sig, frame):
if len(self.SIG_QUEUE) < 5:
self.SIG_QUEUE.append(sig)
self.wakeup()
def run(self):
"Main master loop."
self.start()
util._setproctitle("master [%s]" % self.proc_name)
self.manage_workers()
while True:
try:
sig = self.SIG_QUEUE.pop(0) if len(self.SIG_QUEUE) else None
if sig is None:
self.sleep()
self.murder_workers()
self.manage_workers()
continue
if sig not in self.SIG_NAMES:
self.log.info("Ignoring unknown signal: %s", sig)
continue
signame = self.SIG_NAMES.get(sig)
handler = getattr(self, "handle_%s" % signame, None)
if not handler:
self.log.error("Unhandled signal: %s", signame)
continue
self.log.info("Handling signal: %s", signame)
handler()
self.wakeup()
except StopIteration:
self.halt()
except KeyboardInterrupt:
self.halt()
except HaltServer as inst:
self.halt(reason=inst.reason, exit_status=inst.exit_status)
except SystemExit:
raise
except Exception:
self.log.info("Unhandled exception in main loop:\n%s",
traceback.format_exc())
self.stop(False)
if self.pidfile is not None:
self.pidfile.unlink()
sys.exit(-1)
def handle_chld(self, sig, frame):
"SIGCHLD handling"
self.reap_workers()
self.wakeup()
def handle_hup(self):
"""\
HUP handling.
- Reload configuration
- Start the new worker processes with a new configuration
- Gracefully shutdown the old worker processes
"""
self.log.info("Hang up: %s", self.master_name)
self.reload()
def handle_term(self):
"SIGTERM handling"
raise StopIteration
def handle_int(self):
"SIGINT handling"
self.stop(False)
raise StopIteration
def handle_quit(self):
"SIGQUIT handling"
self.stop(False)
raise StopIteration
def handle_ttin(self):
"""\
SIGTTIN handling.
Increases the number of workers by one.
"""
self.num_workers += 1
self.manage_workers()
def handle_ttou(self):
"""\
SIGTTOU handling.
Decreases the number of workers by one.
"""
if self.num_workers <= 1:
return
self.num_workers -= 1
self.manage_workers()
def handle_usr1(self):
"""\
SIGUSR1 handling.
Kill all workers by sending them a SIGUSR1
"""
self.log.reopen_files()
self.kill_workers(signal.SIGUSR1)
def handle_usr2(self):
"""\
SIGUSR2 handling.
Creates a new master/worker set as a slave of the current
master without affecting old workers. Use this to do live
deployment with the ability to backout a change.
"""
self.reexec()
def handle_winch(self):
"SIGWINCH handling"
if self.cfg.daemon:
self.log.info("graceful stop of workers")
self.num_workers = 0
self.kill_workers(signal.SIGTERM)
else:
self.log.debug("SIGWINCH ignored. Not daemonized")
def wakeup(self):
"""\
Wake up the arbiter by writing to the PIPE
"""
try:
os.write(self.PIPE[1], b'.')
except IOError as e:
if e.errno not in [errno.EAGAIN, errno.EINTR]:
raise
def halt(self, reason=None, exit_status=0):
""" halt arbiter """
self.stop()
self.log.info("Shutting down: %s", self.master_name)
if reason is not None:
self.log.info("Reason: %s", reason)
if self.pidfile is not None:
self.pidfile.unlink()
self.cfg.on_exit(self)
sys.exit(exit_status)
def sleep(self):
"""\
Sleep until PIPE is readable or we timeout.
A readable PIPE means a signal occurred.
"""
try:
ready = select.select([self.PIPE[0]], [], [], 1.0)
if not ready[0]:
return
while os.read(self.PIPE[0], 1):
pass
except select.error as e:
if e.args[0] not in [errno.EAGAIN, errno.EINTR]:
raise
except OSError as e:
if e.errno not in [errno.EAGAIN, errno.EINTR]:
raise
except KeyboardInterrupt:
sys.exit()
def stop(self, graceful=True):
"""\
Stop workers
:attr graceful: boolean, If True (the default) workers will be
killed gracefully (ie. trying to wait for the current connection)
"""
self.LISTENERS = []
sig = signal.SIGTERM
if not graceful:
sig = signal.SIGQUIT
limit = time.time() + self.cfg.graceful_timeout
# instruct the workers to exit
self.kill_workers(sig)
# wait until the graceful timeout
while self.WORKERS and time.time() < limit:
time.sleep(0.1)
self.kill_workers(signal.SIGKILL)
def reexec(self):
"""\
Relaunch the master and workers.
"""
if self.pidfile is not None:
self.pidfile.rename("%s.oldbin" % self.pidfile.fname)
self.reexec_pid = os.fork()
if self.reexec_pid != 0:
self.master_name = "Old Master"
return
environ = self.cfg.env_orig.copy()
fds = [l.fileno() for l in self.LISTENERS]
environ['GUNICORN_FD'] = ",".join([str(fd) for fd in fds])
os.chdir(self.START_CTX['cwd'])
self.cfg.pre_exec(self)
# exec the process using the original environnement
os.execvpe(self.START_CTX[0], self.START_CTX['args'], environ)
def reload(self):
old_address = self.cfg.address
# reset old environement
for k in self.cfg.env:
if k in self.cfg.env_orig:
# reset the key to the value it had before
# we launched gunicorn
os.environ[k] = self.cfg.env_orig[k]
else:
# delete the value set by gunicorn
try:
del os.environ[k]
except KeyError:
pass
# reload conf
self.app.reload()
self.setup(self.app)
# reopen log files
self.log.reopen_files()
# do we need to change listener ?
if old_address != self.cfg.address:
# close all listeners
[l.close() for l in self.LISTENERS]
# init new listeners
self.LISTENERS = create_sockets(self.cfg, self.log)
self.log.info("Listening at: %s", ",".join(str(self.LISTENERS)))
# do some actions on reload
self.cfg.on_reload(self)
# unlink pidfile
if self.pidfile is not None:
self.pidfile.unlink()
# create new pidfile
if self.cfg.pidfile is not None:
self.pidfile = Pidfile(self.cfg.pidfile)
self.pidfile.create(self.pid)
# set new proc_name
util._setproctitle("master [%s]" % self.proc_name)
# spawn new workers
for i in range(self.cfg.workers):
self.spawn_worker()
# manage workers
self.manage_workers()
def murder_workers(self):
"""\
Kill unused/idle workers
"""
if not self.timeout:
return
workers = list(self.WORKERS.items())
for (pid, worker) in workers:
try:
if time.time() - worker.tmp.last_update() <= self.timeout:
continue
except ValueError:
continue
if not worker.aborted:
self.log.critical("WORKER TIMEOUT (pid:%s)", pid)
worker.aborted = True
self.kill_worker(pid, signal.SIGABRT)
else:
self.kill_worker(pid, signal.SIGKILL)
def reap_workers(self):
"""\
Reap workers to avoid zombie processes
"""
try:
while True:
wpid, status = os.waitpid(-1, os.WNOHANG)
if not wpid:
break
if self.reexec_pid == wpid:
self.reexec_pid = 0
else:
# A worker said it cannot boot. We'll shutdown
# to avoid infinite start/stop cycles.
exitcode = status >> 8
if exitcode == self.WORKER_BOOT_ERROR:
reason = "Worker failed to boot."
raise HaltServer(reason, self.WORKER_BOOT_ERROR)
if exitcode == self.APP_LOAD_ERROR:
reason = "App failed to load."
raise HaltServer(reason, self.APP_LOAD_ERROR)
worker = self.WORKERS.pop(wpid, None)
if not worker:
continue
worker.tmp.close()
except OSError as e:
if e.errno != errno.ECHILD:
raise
def manage_workers(self):
"""\
Maintain the number of workers by spawning or killing
as required.
"""
if len(self.WORKERS.keys()) < self.num_workers:
self.spawn_workers()
workers = self.WORKERS.items()
workers = sorted(workers, key=lambda w: w[1].age)
while len(workers) > self.num_workers:
(pid, _) = workers.pop(0)
self.kill_worker(pid, signal.SIGTERM)
self.log.debug("{0} workers".format(len(workers)),
extra={"metric": "gunicorn.workers",
"value": len(workers),
"mtype": "gauge"})
def spawn_worker(self):
self.worker_age += 1
worker = self.worker_class(self.worker_age, self.pid, self.LISTENERS,
self.app, self.timeout / 2.0,
self.cfg, self.log)
self.cfg.pre_fork(self, worker)
pid = os.fork()
if pid != 0:
self.WORKERS[pid] = worker
return pid
# Process Child
worker_pid = os.getpid()
try:
util._setproctitle("worker [%s]" % self.proc_name)
self.log.info("Booting worker with pid: %s", worker_pid)
self.cfg.post_fork(self, worker)
worker.init_process()
sys.exit(0)
except SystemExit:
raise
except AppImportError as e:
self.log.debug("Exception while loading the application: \n%s",
traceback.format_exc())
print("%s" % e, file=sys.stderr)
sys.stderr.flush()
sys.exit(self.APP_LOAD_ERROR)
except:
self.log.exception("Exception in worker process:\n%s",
traceback.format_exc())
if not worker.booted:
sys.exit(self.WORKER_BOOT_ERROR)
sys.exit(-1)
finally:
self.log.info("Worker exiting (pid: %s)", worker_pid)
try:
worker.tmp.close()
self.cfg.worker_exit(self, worker)
except:
pass
def spawn_workers(self):
"""\
Spawn new workers as needed.
This is where a worker process leaves the main loop
of the master process.
"""
for i in range(self.num_workers - len(self.WORKERS.keys())):
self.spawn_worker()
time.sleep(0.1 * random.random())
def kill_workers(self, sig):
"""\
Kill all workers with the signal `sig`
:attr sig: `signal.SIG*` value
"""
worker_pids = list(self.WORKERS.keys())
for pid in worker_pids:
self.kill_worker(pid, sig)
def kill_worker(self, pid, sig):
"""\
Kill a worker
:attr pid: int, worker pid
:attr sig: `signal.SIG*` value
"""
try:
os.kill(pid, sig)
except OSError as e:
if e.errno == errno.ESRCH:
try:
worker = self.WORKERS.pop(pid)
worker.tmp.close()
self.cfg.worker_exit(self, worker)
return
except (KeyError, OSError):
return
raise
| gpl-2.0 |
hinerm/ITK | Wrapping/Generators/SwigInterface/idx.py | 13 | 1236 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os
sys.path.append(sys.path[0]+os.sep+'pygccxml-1.0.0')
import pygccxml, sys, cStringIO
# the output file
outputFile = cStringIO.StringIO()
# init the pygccxml stuff
pygccxml.declarations.scopedef_t.RECURSIVE_DEFAULT = False
pygccxml.declarations.scopedef_t.ALLOW_EMPTY_MDECL_WRAPPER = True
pygccxml_config = pygccxml.parser.config.config_t()
pygccxml_reader = pygccxml.parser.source_reader.source_reader_t(pygccxml_config)
# and read a xml file
res = pygccxml_reader.read_xml_file(sys.argv[1])
global_ns = pygccxml.declarations.get_global_namespace( res )
cable_ns = global_ns.namespace('_cable_')
wrappers_ns = cable_ns.namespace('wrappers')
module = os.path.splitext(os.path.basename(sys.argv[1]))[0]
# iterate over all the typedefs in the _cable_::wrappers namespace
for typedef in wrappers_ns.typedefs():
n = typedef.name
s = typedef.type.decl_string
# drop the :: prefix - it make swig produce invalid code
if s.startswith("::"):
s = s[2:]
print >> outputFile, "{%s} {%s} {%s}" % (s, n, module)
content = outputFile.getvalue()
if sys.argv[2] != '-':
f = file( sys.argv[2], "w" )
f.write( content )
f.close()
else:
sys.stdout.write( content )
| apache-2.0 |
AnhellO/DAS_Sistemas | Ago-Dic-2020/aguilar-cedillo-jonathan-ivan/practica 2/practica-2.py | 1 | 1353 | class Album():
def __init__(self, titulo, cancion, genero, artista, año):
self.titulo = titulo
self.cancion= cancion
self.genero = genero
self.artista = artista
self.año = año
def get_titulo(self):
return "Informacion: \nTitulo: {} \ncancion: {} \ngenero: {} \nartista: {}\naño: {}\n".format(self.titulo, self.cancion, self.genero, self.artista, self.año)
def get_cancion(self):
return"resumen: \ncancion: {}, \ngenero: {}, \nartista: {} \n".format(self.cancion, self.genero, self.artista)
class Cancion(Album):
def __init__(self, titulo, cancion, genero, artista, año, Nombre, pista, duracion):
Album.__init__(self, titulo, cancion, genero, artista, año)
self.Nombre= Nombre
self.pista= pista
self.duracion= duracion
def get_pista(self):
return "Informacion de la cancion: \nNombre: {}, \npista: {}, \nduracion \n".format(self.Nombre, self.pista, self.duracion)
def get_Nombre(self, Nombre):
self.Nombre = Nombre
Alter_Bridge=Album('One Day Remains','Open Your Eyes', 'Rock', 'Alter Bridge','2004')
print(Alter_Bridge.get_titulo())
print(Alter_Bridge.get_cancion())
open=Cancion('One Day Remains','Open Your Eyes','Rock','Alter Bridge','2004','Metalingus','5','4.19 min')
print(open.get_pista()) | mit |
devs1991/test_edx_docmode | openedx/core/djangoapps/profile_images/views.py | 29 | 8490 | """
This module implements the upload and remove endpoints of the profile image api.
"""
from contextlib import closing
import datetime
import itertools
import logging
from django.utils.translation import ugettext as _
from django.utils.timezone import utc
from rest_framework import permissions, status
from rest_framework.parsers import MultiPartParser, FormParser
from rest_framework.response import Response
from rest_framework.views import APIView
from openedx.core.djangoapps.user_api.errors import UserNotFound
from openedx.core.lib.api.authentication import (
OAuth2AuthenticationAllowInactiveUser,
SessionAuthenticationAllowInactiveUser,
)
from openedx.core.lib.api.parsers import TypedFileUploadParser
from openedx.core.lib.api.permissions import IsUserInUrl
from openedx.core.lib.api.view_utils import DeveloperErrorViewMixin
from openedx.core.djangoapps.user_api.accounts.image_helpers import get_profile_image_names, set_has_profile_image
from .exceptions import ImageValidationError
from .images import (
IMAGE_TYPES, validate_uploaded_image, create_profile_images, remove_profile_images
)
log = logging.getLogger(__name__)
LOG_MESSAGE_CREATE = 'Generated and uploaded images %(image_names)s for user %(user_id)s'
LOG_MESSAGE_DELETE = 'Deleted images %(image_names)s for user %(user_id)s'
def _make_upload_dt():
"""
Generate a server-side timestamp for the upload. This is in a separate
function so its behavior can be overridden in tests.
"""
return datetime.datetime.utcnow().replace(tzinfo=utc)
class ProfileImageView(DeveloperErrorViewMixin, APIView):
"""
**Use Cases**
Add or remove profile images associated with user accounts.
The requesting user must be signed in. Users can only add profile
images to their own account. Users with staff access can remove
profile images for other user accounts. All other users can remove
only their own profile images.
**Example Requests**
POST /api/user/v1/accounts/{username}/image
DELETE /api/user/v1/accounts/{username}/image
**Example POST Responses**
When the requesting user attempts to upload an image for their own
account, the request returns one of the following responses:
* If the upload could not be performed, the request returns an HTTP 400
"Bad Request" response with information about why the request failed.
* If the upload is successful, the request returns an HTTP 204 "No
Content" response with no additional content.
If the requesting user tries to upload an image for a different
user, the request returns one of the following responses:
* If no user matches the "username" parameter, the request returns an
HTTP 404 "Not Found" response.
* If the user whose profile image is being uploaded exists, but the
requesting user does not have staff access, the request returns an
HTTP 404 "Not Found" response.
* If the specified user exists, and the requesting user has staff
access, the request returns an HTTP 403 "Forbidden" response.
**Example DELETE Responses**
When the requesting user attempts to remove the profile image for
their own account, the request returns one of the following
responses:
* If the image could not be removed, the request returns an HTTP 400
"Bad Request" response with information about why the request failed.
* If the request successfully removes the image, the request returns
an HTTP 204 "No Content" response with no additional content.
When the requesting user tries to remove the profile image for a
different user, the view will return one of the following responses:
* If the requesting user has staff access, and the "username" parameter
matches a user, the profile image for the specified user is deleted,
and the request returns an HTTP 204 "No Content" response with no
additional content.
* If the requesting user has staff access, but no user is matched by
the "username" parameter, the request returns an HTTP 404 "Not Found"
response.
* If the requesting user does not have staff access, the request
returns an HTTP 404 "Not Found" response, regardless of whether
the user exists or not.
"""
parser_classes = (MultiPartParser, FormParser, TypedFileUploadParser)
authentication_classes = (OAuth2AuthenticationAllowInactiveUser, SessionAuthenticationAllowInactiveUser)
permission_classes = (permissions.IsAuthenticated, IsUserInUrl)
upload_media_types = set(itertools.chain(*(image_type.mimetypes for image_type in IMAGE_TYPES.values())))
def post(self, request, username):
"""
POST /api/user/v1/accounts/{username}/image
"""
# validate request:
# verify that the user's
# ensure any file was sent
if 'file' not in request.FILES:
return Response(
{
"developer_message": u"No file provided for profile image",
"user_message": _(u"No file provided for profile image"),
},
status=status.HTTP_400_BAD_REQUEST
)
# process the upload.
uploaded_file = request.FILES['file']
# no matter what happens, delete the temporary file when we're done
with closing(uploaded_file):
# image file validation.
try:
validate_uploaded_image(uploaded_file)
except ImageValidationError as error:
return Response(
{"developer_message": error.message, "user_message": error.user_message},
status=status.HTTP_400_BAD_REQUEST,
)
# generate profile pic and thumbnails and store them
profile_image_names = get_profile_image_names(username)
create_profile_images(uploaded_file, profile_image_names)
# update the user account to reflect that a profile image is available.
set_has_profile_image(username, True, _make_upload_dt())
log.info(
LOG_MESSAGE_CREATE,
{'image_names': profile_image_names.values(), 'user_id': request.user.id}
)
# send client response.
return Response(status=status.HTTP_204_NO_CONTENT)
def delete(self, request, username):
"""
DELETE /api/user/v1/accounts/{username}/image
"""
try:
# update the user account to reflect that the images were removed.
set_has_profile_image(username, False)
# remove physical files from storage.
profile_image_names = get_profile_image_names(username)
remove_profile_images(profile_image_names)
log.info(
LOG_MESSAGE_DELETE,
{'image_names': profile_image_names.values(), 'user_id': request.user.id}
)
except UserNotFound:
return Response(status=status.HTTP_404_NOT_FOUND)
# send client response.
return Response(status=status.HTTP_204_NO_CONTENT)
class ProfileImageUploadView(APIView):
"""
**DEPRECATION WARNING**
/api/profile_images/v1/{username}/upload is deprecated.
All requests should now be sent to
/api/user/v1/accounts/{username}/image
"""
parser_classes = ProfileImageView.parser_classes
authentication_classes = ProfileImageView.authentication_classes
permission_classes = ProfileImageView.permission_classes
def post(self, request, username):
"""
POST /api/profile_images/v1/{username}/upload
"""
return ProfileImageView().post(request, username)
class ProfileImageRemoveView(APIView):
"""
**DEPRECATION WARNING**
/api/profile_images/v1/{username}/remove is deprecated.
This endpoint's POST is replaced by the DELETE method at
/api/user/v1/accounts/{username}/image.
"""
authentication_classes = ProfileImageView.authentication_classes
permission_classes = ProfileImageView.permission_classes
def post(self, request, username):
"""
POST /api/profile_images/v1/{username}/remove
"""
return ProfileImageView().delete(request, username)
| agpl-3.0 |
cgstudiomap/cgstudiomap | main/eggs/reportlab-3.2.0-py2.7-linux-x86_64.egg/reportlab/lib/abag.py | 32 | 1140 | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/abag.py
__version__=''' $Id$ '''
__doc__='''Data structure to hold a collection of attributes, used by styles.'''
class ABag:
"""
'Attribute Bag' - a trivial BAG class for holding attributes.
This predates modern Python. Doing this again, we'd use a subclass
of dict.
You may initialize with keyword arguments.
a = ABag(k0=v0,....,kx=vx,....) ==> getattr(a,'kx')==vx
c = a.clone(ak0=av0,.....) copy with optional additional attributes.
"""
def __init__(self,**attr):
self.__dict__.update(attr)
def clone(self,**attr):
n = self.__class__(**self.__dict__)
if attr: n.__dict__.update(attr)
return n
def __repr__(self):
D = self.__dict__
K = list(D.keys())
K.sort()
return '%s(%s)' % (self.__class__.__name__,', '.join(['%s=%r' % (k,D[k]) for k in K]))
if __name__=="__main__":
AB = ABag(a=1, c="hello")
CD = AB.clone()
print(AB)
print(CD)
| agpl-3.0 |
shermanng10/superathletebuilder | env/lib/python2.7/site-packages/setuptools/msvc9_support.py | 429 | 2187 | try:
import distutils.msvc9compiler
except ImportError:
pass
unpatched = dict()
def patch_for_specialized_compiler():
"""
Patch functions in distutils.msvc9compiler to use the standalone compiler
build for Python (Windows only). Fall back to original behavior when the
standalone compiler is not available.
"""
if 'distutils' not in globals():
# The module isn't available to be patched
return
if unpatched:
# Already patched
return
unpatched.update(vars(distutils.msvc9compiler))
distutils.msvc9compiler.find_vcvarsall = find_vcvarsall
distutils.msvc9compiler.query_vcvarsall = query_vcvarsall
def find_vcvarsall(version):
Reg = distutils.msvc9compiler.Reg
VC_BASE = r'Software\%sMicrosoft\DevDiv\VCForPython\%0.1f'
key = VC_BASE % ('', version)
try:
# Per-user installs register the compiler path here
productdir = Reg.get_value(key, "installdir")
except KeyError:
try:
# All-user installs on a 64-bit system register here
key = VC_BASE % ('Wow6432Node\\', version)
productdir = Reg.get_value(key, "installdir")
except KeyError:
productdir = None
if productdir:
import os
vcvarsall = os.path.join(productdir, "vcvarsall.bat")
if os.path.isfile(vcvarsall):
return vcvarsall
return unpatched['find_vcvarsall'](version)
def query_vcvarsall(version, *args, **kwargs):
try:
return unpatched['query_vcvarsall'](version, *args, **kwargs)
except distutils.errors.DistutilsPlatformError as exc:
if exc and "vcvarsall.bat" in exc.args[0]:
message = 'Microsoft Visual C++ %0.1f is required (%s).' % (version, exc.args[0])
if int(version) == 9:
# This redirection link is maintained by Microsoft.
# Contact vspython@microsoft.com if it needs updating.
raise distutils.errors.DistutilsPlatformError(
message + ' Get it from http://aka.ms/vcpython27'
)
raise distutils.errors.DistutilsPlatformError(message)
raise
| mit |
RAtechntukan/Sick-Beard | lib/imdb/parser/__init__.py | 143 | 1030 | """
parser package (imdb package).
This package provides various parsers to access IMDb data (e.g.: a
parser for the web/http interface, a parser for the SQL database
interface, etc.).
So far, the http/httpThin, mobile and sql parsers are implemented.
Copyright 2004-2009 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
__all__ = ['http', 'mobile', 'sql']
| gpl-3.0 |
soldag/home-assistant | homeassistant/components/dlna_dmr/media_player.py | 4 | 13775 | """Support for DLNA DMR (Device Media Renderer)."""
import asyncio
from datetime import timedelta
import functools
import logging
from typing import Optional
import aiohttp
from async_upnp_client import UpnpFactory
from async_upnp_client.aiohttp import AiohttpNotifyServer, AiohttpSessionRequester
from async_upnp_client.profiles.dlna import DeviceState, DmrDevice
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_CHANNEL,
MEDIA_TYPE_EPISODE,
MEDIA_TYPE_IMAGE,
MEDIA_TYPE_MOVIE,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_PLAYLIST,
MEDIA_TYPE_TVSHOW,
MEDIA_TYPE_VIDEO,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_STOP,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import (
CONF_NAME,
CONF_URL,
EVENT_HOMEASSISTANT_STOP,
STATE_IDLE,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import get_local_ip
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DLNA_DMR_DATA = "dlna_dmr"
DEFAULT_NAME = "DLNA Digital Media Renderer"
DEFAULT_LISTEN_PORT = 8301
CONF_LISTEN_IP = "listen_ip"
CONF_LISTEN_PORT = "listen_port"
CONF_CALLBACK_URL_OVERRIDE = "callback_url_override"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_URL): cv.string,
vol.Optional(CONF_LISTEN_IP): cv.string,
vol.Optional(CONF_LISTEN_PORT, default=DEFAULT_LISTEN_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_CALLBACK_URL_OVERRIDE): cv.url,
}
)
HOME_ASSISTANT_UPNP_CLASS_MAPPING = {
MEDIA_TYPE_MUSIC: "object.item.audioItem",
MEDIA_TYPE_TVSHOW: "object.item.videoItem",
MEDIA_TYPE_MOVIE: "object.item.videoItem",
MEDIA_TYPE_VIDEO: "object.item.videoItem",
MEDIA_TYPE_EPISODE: "object.item.videoItem",
MEDIA_TYPE_CHANNEL: "object.item.videoItem",
MEDIA_TYPE_IMAGE: "object.item.imageItem",
MEDIA_TYPE_PLAYLIST: "object.item.playlistItem",
}
UPNP_CLASS_DEFAULT = "object.item"
HOME_ASSISTANT_UPNP_MIME_TYPE_MAPPING = {
MEDIA_TYPE_MUSIC: "audio/*",
MEDIA_TYPE_TVSHOW: "video/*",
MEDIA_TYPE_MOVIE: "video/*",
MEDIA_TYPE_VIDEO: "video/*",
MEDIA_TYPE_EPISODE: "video/*",
MEDIA_TYPE_CHANNEL: "video/*",
MEDIA_TYPE_IMAGE: "image/*",
MEDIA_TYPE_PLAYLIST: "playlist/*",
}
def catch_request_errors():
"""Catch asyncio.TimeoutError, aiohttp.ClientError errors."""
def call_wrapper(func):
"""Call wrapper for decorator."""
@functools.wraps(func)
async def wrapper(self, *args, **kwargs):
"""Catch asyncio.TimeoutError, aiohttp.ClientError errors."""
try:
return await func(self, *args, **kwargs)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Error during call %s", func.__name__)
return wrapper
return call_wrapper
async def async_start_event_handler(
hass: HomeAssistantType,
server_host: str,
server_port: int,
requester,
callback_url_override: Optional[str] = None,
):
"""Register notify view."""
hass_data = hass.data[DLNA_DMR_DATA]
if "event_handler" in hass_data:
return hass_data["event_handler"]
# start event handler
server = AiohttpNotifyServer(
requester,
listen_port=server_port,
listen_host=server_host,
callback_url=callback_url_override,
)
await server.start_server()
_LOGGER.info("UPNP/DLNA event handler listening, url: %s", server.callback_url)
hass_data["notify_server"] = server
hass_data["event_handler"] = server.event_handler
# register for graceful shutdown
async def async_stop_server(event):
"""Stop server."""
_LOGGER.debug("Stopping UPNP/DLNA event handler")
await server.stop_server()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_stop_server)
return hass_data["event_handler"]
async def async_setup_platform(
hass: HomeAssistantType, config, async_add_entities, discovery_info=None
):
"""Set up DLNA DMR platform."""
if config.get(CONF_URL) is not None:
url = config[CONF_URL]
name = config.get(CONF_NAME)
elif discovery_info is not None:
url = discovery_info["ssdp_description"]
name = discovery_info.get("name")
if DLNA_DMR_DATA not in hass.data:
hass.data[DLNA_DMR_DATA] = {}
if "lock" not in hass.data[DLNA_DMR_DATA]:
hass.data[DLNA_DMR_DATA]["lock"] = asyncio.Lock()
# build upnp/aiohttp requester
session = async_get_clientsession(hass)
requester = AiohttpSessionRequester(session, True)
# ensure event handler has been started
async with hass.data[DLNA_DMR_DATA]["lock"]:
server_host = config.get(CONF_LISTEN_IP)
if server_host is None:
server_host = get_local_ip()
server_port = config.get(CONF_LISTEN_PORT, DEFAULT_LISTEN_PORT)
callback_url_override = config.get(CONF_CALLBACK_URL_OVERRIDE)
event_handler = await async_start_event_handler(
hass, server_host, server_port, requester, callback_url_override
)
# create upnp device
factory = UpnpFactory(requester, disable_state_variable_validation=True)
try:
upnp_device = await factory.async_create_device(url)
except (asyncio.TimeoutError, aiohttp.ClientError) as err:
raise PlatformNotReady() from err
# wrap with DmrDevice
dlna_device = DmrDevice(upnp_device, event_handler)
# create our own device
device = DlnaDmrDevice(dlna_device, name)
_LOGGER.debug("Adding device: %s", device)
async_add_entities([device], True)
class DlnaDmrDevice(MediaPlayerEntity):
"""Representation of a DLNA DMR device."""
def __init__(self, dmr_device, name=None):
"""Initialize DLNA DMR device."""
self._device = dmr_device
self._name = name
self._available = False
self._subscription_renew_time = None
async def async_added_to_hass(self):
"""Handle addition."""
self._device.on_event = self._on_event
# Register unsubscribe on stop
bus = self.hass.bus
bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, self._async_on_hass_stop)
@property
def available(self):
"""Device is available."""
return self._available
async def _async_on_hass_stop(self, event):
"""Event handler on Home Assistant stop."""
async with self.hass.data[DLNA_DMR_DATA]["lock"]:
await self._device.async_unsubscribe_services()
async def async_update(self):
"""Retrieve the latest data."""
was_available = self._available
try:
await self._device.async_update()
self._available = True
except (asyncio.TimeoutError, aiohttp.ClientError):
self._available = False
_LOGGER.debug("Device unavailable")
return
# do we need to (re-)subscribe?
now = dt_util.utcnow()
should_renew = (
self._subscription_renew_time and now >= self._subscription_renew_time
)
if should_renew or not was_available and self._available:
try:
timeout = await self._device.async_subscribe_services()
self._subscription_renew_time = dt_util.utcnow() + timeout / 2
except (asyncio.TimeoutError, aiohttp.ClientError):
self._available = False
_LOGGER.debug("Could not (re)subscribe")
def _on_event(self, service, state_variables):
"""State variable(s) changed, let home-assistant know."""
self.schedule_update_ha_state()
@property
def supported_features(self):
"""Flag media player features that are supported."""
supported_features = 0
if self._device.has_volume_level:
supported_features |= SUPPORT_VOLUME_SET
if self._device.has_volume_mute:
supported_features |= SUPPORT_VOLUME_MUTE
if self._device.has_play:
supported_features |= SUPPORT_PLAY
if self._device.has_pause:
supported_features |= SUPPORT_PAUSE
if self._device.has_stop:
supported_features |= SUPPORT_STOP
if self._device.has_previous:
supported_features |= SUPPORT_PREVIOUS_TRACK
if self._device.has_next:
supported_features |= SUPPORT_NEXT_TRACK
if self._device.has_play_media:
supported_features |= SUPPORT_PLAY_MEDIA
if self._device.has_seek_rel_time:
supported_features |= SUPPORT_SEEK
return supported_features
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._device.volume_level
@catch_request_errors()
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
await self._device.async_set_volume_level(volume)
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._device.is_volume_muted
@catch_request_errors()
async def async_mute_volume(self, mute):
"""Mute the volume."""
desired_mute = bool(mute)
await self._device.async_mute_volume(desired_mute)
@catch_request_errors()
async def async_media_pause(self):
"""Send pause command."""
if not self._device.can_pause:
_LOGGER.debug("Cannot do Pause")
return
await self._device.async_pause()
@catch_request_errors()
async def async_media_play(self):
"""Send play command."""
if not self._device.can_play:
_LOGGER.debug("Cannot do Play")
return
await self._device.async_play()
@catch_request_errors()
async def async_media_stop(self):
"""Send stop command."""
if not self._device.can_stop:
_LOGGER.debug("Cannot do Stop")
return
await self._device.async_stop()
@catch_request_errors()
async def async_media_seek(self, position):
"""Send seek command."""
if not self._device.can_seek_rel_time:
_LOGGER.debug("Cannot do Seek/rel_time")
return
time = timedelta(seconds=position)
await self._device.async_seek_rel_time(time)
@catch_request_errors()
async def async_play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
title = "Home Assistant"
mime_type = HOME_ASSISTANT_UPNP_MIME_TYPE_MAPPING.get(media_type, media_type)
upnp_class = HOME_ASSISTANT_UPNP_CLASS_MAPPING.get(
media_type, UPNP_CLASS_DEFAULT
)
# Stop current playing media
if self._device.can_stop:
await self.async_media_stop()
# Queue media
await self._device.async_set_transport_uri(
media_id, title, mime_type, upnp_class
)
await self._device.async_wait_for_can_play()
# If already playing, no need to call Play
if self._device.state == DeviceState.PLAYING:
return
# Play it
await self.async_media_play()
@catch_request_errors()
async def async_media_previous_track(self):
"""Send previous track command."""
if not self._device.can_previous:
_LOGGER.debug("Cannot do Previous")
return
await self._device.async_previous()
@catch_request_errors()
async def async_media_next_track(self):
"""Send next track command."""
if not self._device.can_next:
_LOGGER.debug("Cannot do Next")
return
await self._device.async_next()
@property
def media_title(self):
"""Title of current playing media."""
return self._device.media_title
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._device.media_image_url
@property
def state(self):
"""State of the player."""
if not self._available:
return STATE_OFF
if self._device.state is None:
return STATE_ON
if self._device.state == DeviceState.PLAYING:
return STATE_PLAYING
if self._device.state == DeviceState.PAUSED:
return STATE_PAUSED
return STATE_IDLE
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._device.media_duration
@property
def media_position(self):
"""Position of current playing media in seconds."""
return self._device.media_position
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
return self._device.media_position_updated_at
@property
def name(self) -> str:
"""Return the name of the device."""
if self._name:
return self._name
return self._device.name
@property
def unique_id(self) -> str:
"""Return an unique ID."""
return self._device.udn
| apache-2.0 |
patjak/drm-gma500 | scripts/gdb/linux/tasks.py | 630 | 2892 | #
# gdb helper commands and functions for Linux kernel debugging
#
# task & thread tools
#
# Copyright (c) Siemens AG, 2011-2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import utils
task_type = utils.CachedType("struct task_struct")
def task_lists():
task_ptr_type = task_type.get_type().pointer()
init_task = gdb.parse_and_eval("init_task").address
t = g = init_task
while True:
while True:
yield t
t = utils.container_of(t['thread_group']['next'],
task_ptr_type, "thread_group")
if t == g:
break
t = g = utils.container_of(g['tasks']['next'],
task_ptr_type, "tasks")
if t == init_task:
return
def get_task_by_pid(pid):
for task in task_lists():
if int(task['pid']) == pid:
return task
return None
class LxTaskByPidFunc(gdb.Function):
"""Find Linux task by PID and return the task_struct variable.
$lx_task_by_pid(PID): Given PID, iterate over all tasks of the target and
return that task_struct variable which PID matches."""
def __init__(self):
super(LxTaskByPidFunc, self).__init__("lx_task_by_pid")
def invoke(self, pid):
task = get_task_by_pid(pid)
if task:
return task.dereference()
else:
raise gdb.GdbError("No task of PID " + str(pid))
LxTaskByPidFunc()
class LxPs(gdb.Command):
"""Dump Linux tasks."""
def __init__(self):
super(LxPs, self).__init__("lx-ps", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
for task in task_lists():
gdb.write("{address} {pid} {comm}\n".format(
address=task,
pid=task["pid"],
comm=task["comm"].string()))
LxPs()
thread_info_type = utils.CachedType("struct thread_info")
ia64_task_size = None
def get_thread_info(task):
thread_info_ptr_type = thread_info_type.get_type().pointer()
if utils.is_target_arch("ia64"):
global ia64_task_size
if ia64_task_size is None:
ia64_task_size = gdb.parse_and_eval("sizeof(struct task_struct)")
thread_info_addr = task.address + ia64_task_size
thread_info = thread_info_addr.cast(thread_info_ptr_type)
else:
thread_info = task['stack'].cast(thread_info_ptr_type)
return thread_info.dereference()
class LxThreadInfoFunc (gdb.Function):
"""Calculate Linux thread_info from task variable.
$lx_thread_info(TASK): Given TASK, return the corresponding thread_info
variable."""
def __init__(self):
super(LxThreadInfoFunc, self).__init__("lx_thread_info")
def invoke(self, task):
return get_thread_info(task)
LxThreadInfoFunc()
| gpl-2.0 |
minhphung171093/GreenERP_V8 | openerp/addons/mrp/mrp.py | 145 | 73675 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import openerp.addons.decimal_precision as dp
from collections import OrderedDict
from openerp.osv import fields, osv, orm
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
from openerp.tools import float_compare, float_is_zero
from openerp.tools.translate import _
from openerp import tools, SUPERUSER_ID
from openerp.addons.product import _common
class mrp_property_group(osv.osv):
"""
Group of mrp properties.
"""
_name = 'mrp.property.group'
_description = 'Property Group'
_columns = {
'name': fields.char('Property Group', required=True),
'description': fields.text('Description'),
}
class mrp_property(osv.osv):
"""
Properties of mrp.
"""
_name = 'mrp.property'
_description = 'Property'
_columns = {
'name': fields.char('Name', required=True),
'composition': fields.selection([('min','min'),('max','max'),('plus','plus')], 'Properties composition', required=True, help="Not used in computations, for information purpose only."),
'group_id': fields.many2one('mrp.property.group', 'Property Group', required=True),
'description': fields.text('Description'),
}
_defaults = {
'composition': lambda *a: 'min',
}
#----------------------------------------------------------
# Work Centers
#----------------------------------------------------------
# capacity_hour : capacity per hour. default: 1.0.
# Eg: If 5 concurrent operations at one time: capacity = 5 (because 5 employees)
# unit_per_cycle : how many units are produced for one cycle
class mrp_workcenter(osv.osv):
_name = 'mrp.workcenter'
_description = 'Work Center'
_inherits = {'resource.resource':"resource_id"}
_columns = {
'note': fields.text('Description', help="Description of the Work Center. Explain here what's a cycle according to this Work Center."),
'capacity_per_cycle': fields.float('Capacity per Cycle', help="Number of operations this Work Center can do in parallel. If this Work Center represents a team of 5 workers, the capacity per cycle is 5."),
'time_cycle': fields.float('Time for 1 cycle (hour)', help="Time in hours for doing one cycle."),
'time_start': fields.float('Time before prod.', help="Time in hours for the setup."),
'time_stop': fields.float('Time after prod.', help="Time in hours for the cleaning."),
'costs_hour': fields.float('Cost per hour', help="Specify Cost of Work Center per hour."),
'costs_hour_account_id': fields.many2one('account.analytic.account', 'Hour Account', domain=[('type','!=','view')],
help="Fill this only if you want automatic analytic accounting entries on production orders."),
'costs_cycle': fields.float('Cost per cycle', help="Specify Cost of Work Center per cycle."),
'costs_cycle_account_id': fields.many2one('account.analytic.account', 'Cycle Account', domain=[('type','!=','view')],
help="Fill this only if you want automatic analytic accounting entries on production orders."),
'costs_journal_id': fields.many2one('account.analytic.journal', 'Analytic Journal'),
'costs_general_account_id': fields.many2one('account.account', 'General Account', domain=[('type','!=','view')]),
'resource_id': fields.many2one('resource.resource','Resource', ondelete='cascade', required=True),
'product_id': fields.many2one('product.product','Work Center Product', help="Fill this product to easily track your production costs in the analytic accounting."),
}
_defaults = {
'capacity_per_cycle': 1.0,
'resource_type': 'material',
}
def on_change_product_cost(self, cr, uid, ids, product_id, context=None):
value = {}
if product_id:
cost = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
value = {'costs_hour': cost.standard_price}
return {'value': value}
def _check_capacity_per_cycle(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context=context):
if obj.capacity_per_cycle <= 0.0:
return False
return True
_constraints = [
(_check_capacity_per_cycle, 'The capacity per cycle must be strictly positive.', ['capacity_per_cycle']),
]
class mrp_routing(osv.osv):
"""
For specifying the routings of Work Centers.
"""
_name = 'mrp.routing'
_description = 'Routing'
_columns = {
'name': fields.char('Name', required=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the routing without removing it."),
'code': fields.char('Code', size=8),
'note': fields.text('Description'),
'workcenter_lines': fields.one2many('mrp.routing.workcenter', 'routing_id', 'Work Centers', copy=True),
'location_id': fields.many2one('stock.location', 'Production Location',
help="Keep empty if you produce at the location where the finished products are needed." \
"Set a location if you produce at a fixed location. This can be a partner location " \
"if you subcontract the manufacturing operations."
),
'company_id': fields.many2one('res.company', 'Company'),
}
_defaults = {
'active': lambda *a: 1,
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.routing', context=context)
}
class mrp_routing_workcenter(osv.osv):
"""
Defines working cycles and hours of a Work Center using routings.
"""
_name = 'mrp.routing.workcenter'
_description = 'Work Center Usage'
_order = 'sequence'
_columns = {
'workcenter_id': fields.many2one('mrp.workcenter', 'Work Center', required=True),
'name': fields.char('Name', required=True),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of routing Work Centers."),
'cycle_nbr': fields.float('Number of Cycles', required=True,
help="Number of iterations this work center has to do in the specified operation of the routing."),
'hour_nbr': fields.float('Number of Hours', required=True, help="Time in hours for this Work Center to achieve the operation of the specified routing."),
'routing_id': fields.many2one('mrp.routing', 'Parent Routing', select=True, ondelete='cascade',
help="Routing indicates all the Work Centers used, for how long and/or cycles." \
"If Routing is indicated then,the third tab of a production order (Work Centers) will be automatically pre-completed."),
'note': fields.text('Description'),
'company_id': fields.related('routing_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
}
_defaults = {
'cycle_nbr': lambda *a: 1.0,
'hour_nbr': lambda *a: 0.0,
}
class mrp_bom(osv.osv):
"""
Defines bills of material for a product.
"""
_name = 'mrp.bom'
_description = 'Bill of Material'
_inherit = ['mail.thread']
_columns = {
'name': fields.char('Name'),
'code': fields.char('Reference', size=16),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the bills of material without removing it."),
'type': fields.selection([('normal', 'Normal'), ('phantom', 'Set')], 'BoM Type', required=True,
help= "Set: When processing a sales order for this product, the delivery order will contain the raw materials, instead of the finished product."),
'position': fields.char('Internal Reference', help="Reference to a position in an external plan."),
'product_tmpl_id': fields.many2one('product.template', 'Product', domain="[('type', '!=', 'service')]", required=True),
'product_id': fields.many2one('product.product', 'Product Variant',
domain="['&', ('product_tmpl_id','=',product_tmpl_id), ('type','!=', 'service')]",
help="If a product variant is defined the BOM is available only for this product."),
'bom_line_ids': fields.one2many('mrp.bom.line', 'bom_id', 'BoM Lines', copy=True),
'product_qty': fields.float('Product Quantity', required=True, digits_compute=dp.get_precision('Product Unit of Measure')),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, help="Unit of Measure (Unit of Measure) is the unit of measurement for the inventory control"),
'date_start': fields.date('Valid From', help="Validity of this BoM. Keep empty if it's always valid."),
'date_stop': fields.date('Valid Until', help="Validity of this BoM. Keep empty if it's always valid."),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of bills of material."),
'routing_id': fields.many2one('mrp.routing', 'Routing', help="The list of operations (list of work centers) to produce the finished product. "\
"The routing is mainly used to compute work center costs during operations and to plan future loads on work centers based on production planning."),
'product_rounding': fields.float('Product Rounding', help="Rounding applied on the product quantity."),
'product_efficiency': fields.float('Manufacturing Efficiency', required=True, help="A factor of 0.9 means a loss of 10% during the production process."),
'property_ids': fields.many2many('mrp.property', string='Properties'),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
def _get_uom_id(self, cr, uid, *args):
return self.pool["product.uom"].search(cr, uid, [], limit=1, order='id')[0]
_defaults = {
'active': lambda *a: 1,
'product_qty': lambda *a: 1.0,
'product_efficiency': lambda *a: 1.0,
'product_rounding': lambda *a: 0.0,
'type': lambda *a: 'normal',
'product_uom': _get_uom_id,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.bom', context=c),
}
_order = "sequence"
def _bom_find(self, cr, uid, product_tmpl_id=None, product_id=None, properties=None, context=None):
""" Finds BoM for particular product and product uom.
@param product_tmpl_id: Selected product.
@param product_uom: Unit of measure of a product.
@param properties: List of related properties.
@return: False or BoM id.
"""
if not context:
context = {}
if properties is None:
properties = []
if product_id:
if not product_tmpl_id:
product_tmpl_id = self.pool['product.product'].browse(cr, uid, product_id, context=context).product_tmpl_id.id
domain = [
'|',
('product_id', '=', product_id),
'&',
('product_id', '=', False),
('product_tmpl_id', '=', product_tmpl_id)
]
elif product_tmpl_id:
domain = [('product_id', '=', False), ('product_tmpl_id', '=', product_tmpl_id)]
else:
# neither product nor template, makes no sense to search
return False
if context.get('company_id'):
domain = domain + [('company_id', '=', context['company_id'])]
domain = domain + [ '|', ('date_start', '=', False), ('date_start', '<=', time.strftime(DEFAULT_SERVER_DATE_FORMAT)),
'|', ('date_stop', '=', False), ('date_stop', '>=', time.strftime(DEFAULT_SERVER_DATE_FORMAT))]
# order to prioritize bom with product_id over the one without
ids = self.search(cr, uid, domain, order='sequence, product_id', context=context)
# Search a BoM which has all properties specified, or if you can not find one, you could
# pass a BoM without any properties with the smallest sequence
bom_empty_prop = False
for bom in self.pool.get('mrp.bom').browse(cr, uid, ids, context=context):
if not set(map(int, bom.property_ids or [])) - set(properties or []):
if not properties or bom.property_ids:
return bom.id
elif not bom_empty_prop:
bom_empty_prop = bom.id
return bom_empty_prop
def _skip_bom_line(self, cr, uid, line, product, context=None):
""" Control if a BoM line should be produce, can be inherited for add
custom control.
@param line: BoM line.
@param product: Selected product produced.
@return: True or False
"""
if line.date_start and line.date_start > time.strftime(DEFAULT_SERVER_DATE_FORMAT) or \
line.date_stop and line.date_stop < time.strftime(DEFAULT_SERVER_DATE_FORMAT):
return True
# all bom_line_id variant values must be in the product
if line.attribute_value_ids:
if not product or (set(map(int,line.attribute_value_ids or [])) - set(map(int,product.attribute_value_ids))):
return True
return False
def _bom_explode(self, cr, uid, bom, product, factor, properties=None, level=0, routing_id=False, previous_products=None, master_bom=None, context=None):
""" Finds Products and Work Centers for related BoM for manufacturing order.
@param bom: BoM of particular product template.
@param product: Select a particular variant of the BoM. If False use BoM without variants.
@param factor: Factor represents the quantity, but in UoM of the BoM, taking into account the numbers produced by the BoM
@param properties: A List of properties Ids.
@param level: Depth level to find BoM lines starts from 10.
@param previous_products: List of product previously use by bom explore to avoid recursion
@param master_bom: When recursion, used to display the name of the master bom
@return: result: List of dictionaries containing product details.
result2: List of dictionaries containing Work Center details.
"""
uom_obj = self.pool.get("product.uom")
routing_obj = self.pool.get('mrp.routing')
master_bom = master_bom or bom
def _factor(factor, product_efficiency, product_rounding):
factor = factor / (product_efficiency or 1.0)
factor = _common.ceiling(factor, product_rounding)
if factor < product_rounding:
factor = product_rounding
return factor
factor = _factor(factor, bom.product_efficiency, bom.product_rounding)
result = []
result2 = []
routing = (routing_id and routing_obj.browse(cr, uid, routing_id)) or bom.routing_id or False
if routing:
for wc_use in routing.workcenter_lines:
wc = wc_use.workcenter_id
d, m = divmod(factor, wc_use.workcenter_id.capacity_per_cycle)
mult = (d + (m and 1.0 or 0.0))
cycle = mult * wc_use.cycle_nbr
result2.append({
'name': tools.ustr(wc_use.name) + ' - ' + tools.ustr(bom.product_tmpl_id.name_get()[0][1]),
'workcenter_id': wc.id,
'sequence': level + (wc_use.sequence or 0),
'cycle': cycle,
'hour': float(wc_use.hour_nbr * mult + ((wc.time_start or 0.0) + (wc.time_stop or 0.0) + cycle * (wc.time_cycle or 0.0)) * (wc.time_efficiency or 1.0)),
})
for bom_line_id in bom.bom_line_ids:
if self._skip_bom_line(cr, uid, bom_line_id, product, context=context):
continue
if set(map(int, bom_line_id.property_ids or [])) - set(properties or []):
continue
if previous_products and bom_line_id.product_id.product_tmpl_id.id in previous_products:
raise osv.except_osv(_('Invalid Action!'), _('BoM "%s" contains a BoM line with a product recursion: "%s".') % (master_bom.name,bom_line_id.product_id.name_get()[0][1]))
quantity = _factor(bom_line_id.product_qty * factor, bom_line_id.product_efficiency, bom_line_id.product_rounding)
bom_id = self._bom_find(cr, uid, product_id=bom_line_id.product_id.id, properties=properties, context=context)
#If BoM should not behave like PhantoM, just add the product, otherwise explode further
if bom_line_id.type != "phantom" and (not bom_id or self.browse(cr, uid, bom_id, context=context).type != "phantom"):
result.append({
'name': bom_line_id.product_id.name,
'product_id': bom_line_id.product_id.id,
'product_qty': quantity,
'product_uom': bom_line_id.product_uom.id,
'product_uos_qty': bom_line_id.product_uos and _factor(bom_line_id.product_uos_qty * factor, bom_line_id.product_efficiency, bom_line_id.product_rounding) or False,
'product_uos': bom_line_id.product_uos and bom_line_id.product_uos.id or False,
})
elif bom_id:
all_prod = [bom.product_tmpl_id.id] + (previous_products or [])
bom2 = self.browse(cr, uid, bom_id, context=context)
# We need to convert to units/UoM of chosen BoM
factor2 = uom_obj._compute_qty(cr, uid, bom_line_id.product_uom.id, quantity, bom2.product_uom.id)
quantity2 = factor2 / bom2.product_qty
res = self._bom_explode(cr, uid, bom2, bom_line_id.product_id, quantity2,
properties=properties, level=level + 10, previous_products=all_prod, master_bom=master_bom, context=context)
result = result + res[0]
result2 = result2 + res[1]
else:
raise osv.except_osv(_('Invalid Action!'), _('BoM "%s" contains a phantom BoM line but the product "%s" does not have any BoM defined.') % (master_bom.name,bom_line_id.product_id.name_get()[0][1]))
return result, result2
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
bom_data = self.read(cr, uid, id, [], context=context)
default.update(name=_("%s (copy)") % (bom_data['name']))
return super(mrp_bom, self).copy_data(cr, uid, id, default, context=context)
def onchange_uom(self, cr, uid, ids, product_tmpl_id, product_uom, context=None):
res = {'value': {}}
if not product_uom or not product_tmpl_id:
return res
product = self.pool.get('product.template').browse(cr, uid, product_tmpl_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')}
res['value'].update({'product_uom': product.uom_id.id})
return res
def unlink(self, cr, uid, ids, context=None):
if self.pool['mrp.production'].search(cr, uid, [('bom_id', 'in', ids), ('state', 'not in', ['done', 'cancel'])], context=context):
raise osv.except_osv(_('Warning!'), _('You can not delete a Bill of Material with running manufacturing orders.\nPlease close or cancel it first.'))
return super(mrp_bom, self).unlink(cr, uid, ids, context=context)
def onchange_product_tmpl_id(self, cr, uid, ids, product_tmpl_id, product_qty=0, context=None):
""" Changes UoM and name if product_id changes.
@param product_id: Changed product_id
@return: Dictionary of changed values
"""
res = {}
if product_tmpl_id:
prod = self.pool.get('product.template').browse(cr, uid, product_tmpl_id, context=context)
res['value'] = {
'name': prod.name,
'product_uom': prod.uom_id.id,
}
return res
class mrp_bom_line(osv.osv):
_name = 'mrp.bom.line'
_order = "sequence"
_rec_name = "product_id"
def _get_child_bom_lines(self, cr, uid, ids, field_name, arg, context=None):
"""If the BOM line refers to a BOM, return the ids of the child BOM lines"""
bom_obj = self.pool['mrp.bom']
res = {}
for bom_line in self.browse(cr, uid, ids, context=context):
bom_id = bom_obj._bom_find(cr, uid,
product_tmpl_id=bom_line.product_id.product_tmpl_id.id,
product_id=bom_line.product_id.id, context=context)
if bom_id:
child_bom = bom_obj.browse(cr, uid, bom_id, context=context)
res[bom_line.id] = [x.id for x in child_bom.bom_line_ids]
else:
res[bom_line.id] = False
return res
_columns = {
'type': fields.selection([('normal', 'Normal'), ('phantom', 'Phantom')], 'BoM Line Type', required=True,
help="Phantom: this product line will not appear in the raw materials of manufacturing orders,"
"it will be directly replaced by the raw materials of its own BoM, without triggering"
"an extra manufacturing order."),
'product_id': fields.many2one('product.product', 'Product', required=True),
'product_uos_qty': fields.float('Product UOS Qty'),
'product_uos': fields.many2one('product.uom', 'Product UOS', help="Product UOS (Unit of Sale) is the unit of measurement for the invoicing and promotion of stock."),
'product_qty': fields.float('Product Quantity', required=True, digits_compute=dp.get_precision('Product Unit of Measure')),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True,
help="Unit of Measure (Unit of Measure) is the unit of measurement for the inventory control"),
'date_start': fields.date('Valid From', help="Validity of component. Keep empty if it's always valid."),
'date_stop': fields.date('Valid Until', help="Validity of component. Keep empty if it's always valid."),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying."),
'routing_id': fields.many2one('mrp.routing', 'Routing', help="The list of operations (list of work centers) to produce the finished product. The routing is mainly used to compute work center costs during operations and to plan future loads on work centers based on production planning."),
'product_rounding': fields.float('Product Rounding', help="Rounding applied on the product quantity."),
'product_efficiency': fields.float('Manufacturing Efficiency', required=True, help="A factor of 0.9 means a loss of 10% within the production process."),
'property_ids': fields.many2many('mrp.property', string='Properties'), #Not used
'bom_id': fields.many2one('mrp.bom', 'Parent BoM', ondelete='cascade', select=True, required=True),
'attribute_value_ids': fields.many2many('product.attribute.value', string='Variants', help="BOM Product Variants needed form apply this line."),
'child_line_ids': fields.function(_get_child_bom_lines, relation="mrp.bom.line", string="BOM lines of the referred bom", type="one2many")
}
def _get_uom_id(self, cr, uid, *args):
return self.pool["product.uom"].search(cr, uid, [], limit=1, order='id')[0]
_defaults = {
'product_qty': lambda *a: 1.0,
'product_efficiency': lambda *a: 1.0,
'product_rounding': lambda *a: 0.0,
'type': lambda *a: 'normal',
'product_uom': _get_uom_id,
'sequence': 1,
}
_sql_constraints = [
('bom_qty_zero', 'CHECK (product_qty>0)', 'All product quantities must be greater than 0.\n' \
'You should install the mrp_byproduct module if you want to manage extra products on BoMs !'),
]
def onchange_uom(self, cr, uid, ids, product_id, product_uom, context=None):
res = {'value': {}}
if not product_uom or not product_id:
return res
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')}
res['value'].update({'product_uom': product.uom_id.id})
return res
def onchange_product_id(self, cr, uid, ids, product_id, product_qty=0, context=None):
""" Changes UoM if product_id changes.
@param product_id: Changed product_id
@return: Dictionary of changed values
"""
res = {}
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
res['value'] = {
'product_uom': prod.uom_id.id,
'product_uos_qty': 0,
'product_uos': False
}
if prod.uos_id.id:
res['value']['product_uos_qty'] = product_qty * prod.uos_coeff
res['value']['product_uos'] = prod.uos_id.id
return res
class mrp_production(osv.osv):
"""
Production Orders / Manufacturing Orders
"""
_name = 'mrp.production'
_description = 'Manufacturing Order'
_date_name = 'date_planned'
_inherit = ['mail.thread', 'ir.needaction_mixin']
def _production_calc(self, cr, uid, ids, prop, unknow_none, context=None):
""" Calculates total hours and total no. of cycles for a production order.
@param prop: Name of field.
@param unknow_none:
@return: Dictionary of values.
"""
result = {}
for prod in self.browse(cr, uid, ids, context=context):
result[prod.id] = {
'hour_total': 0.0,
'cycle_total': 0.0,
}
for wc in prod.workcenter_lines:
result[prod.id]['hour_total'] += wc.hour
result[prod.id]['cycle_total'] += wc.cycle
return result
def _get_workcenter_line(self, cr, uid, ids, context=None):
result = {}
for line in self.pool['mrp.production.workcenter.line'].browse(cr, uid, ids, context=context):
result[line.production_id.id] = True
return result.keys()
def _src_id_default(self, cr, uid, ids, context=None):
try:
location_model, location_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_stock')
self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
except (orm.except_orm, ValueError):
location_id = False
return location_id
def _dest_id_default(self, cr, uid, ids, context=None):
try:
location_model, location_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_stock')
self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
except (orm.except_orm, ValueError):
location_id = False
return location_id
def _get_progress(self, cr, uid, ids, name, arg, context=None):
""" Return product quantity percentage """
result = dict.fromkeys(ids, 100)
for mrp_production in self.browse(cr, uid, ids, context=context):
if mrp_production.product_qty:
done = 0.0
for move in mrp_production.move_created_ids2:
if not move.scrapped and move.product_id == mrp_production.product_id:
done += move.product_qty
result[mrp_production.id] = done / mrp_production.product_qty * 100
return result
def _moves_assigned(self, cr, uid, ids, name, arg, context=None):
""" Test whether all the consume lines are assigned """
res = {}
for production in self.browse(cr, uid, ids, context=context):
res[production.id] = True
states = [x.state != 'assigned' for x in production.move_lines if x]
if any(states) or len(states) == 0: #When no moves, ready_production will be False, but test_ready will pass
res[production.id] = False
return res
def _mrp_from_move(self, cr, uid, ids, context=None):
""" Return mrp"""
res = []
for move in self.browse(cr, uid, ids, context=context):
res += self.pool.get("mrp.production").search(cr, uid, [('move_lines', 'in', move.id)], context=context)
return res
_columns = {
'name': fields.char('Reference', required=True, readonly=True, states={'draft': [('readonly', False)]}, copy=False),
'origin': fields.char('Source Document', readonly=True, states={'draft': [('readonly', False)]},
help="Reference of the document that generated this production order request.", copy=False),
'priority': fields.selection([('0', 'Not urgent'), ('1', 'Normal'), ('2', 'Urgent'), ('3', 'Very Urgent')], 'Priority',
select=True, readonly=True, states=dict.fromkeys(['draft', 'confirmed'], [('readonly', False)])),
'product_id': fields.many2one('product.product', 'Product', required=True, readonly=True, states={'draft': [('readonly', False)]},
domain=[('type','!=','service')]),
'product_qty': fields.float('Product Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uos_qty': fields.float('Product UoS Quantity', readonly=True, states={'draft': [('readonly', False)]}),
'product_uos': fields.many2one('product.uom', 'Product UoS', readonly=True, states={'draft': [('readonly', False)]}),
'progress': fields.function(_get_progress, type='float',
string='Production progress'),
'location_src_id': fields.many2one('stock.location', 'Raw Materials Location', required=True,
readonly=True, states={'draft': [('readonly', False)]},
help="Location where the system will look for components."),
'location_dest_id': fields.many2one('stock.location', 'Finished Products Location', required=True,
readonly=True, states={'draft': [('readonly', False)]},
help="Location where the system will stock the finished products."),
'date_planned': fields.datetime('Scheduled Date', required=True, select=1, readonly=True, states={'draft': [('readonly', False)]}, copy=False),
'date_start': fields.datetime('Start Date', select=True, readonly=True, copy=False),
'date_finished': fields.datetime('End Date', select=True, readonly=True, copy=False),
'bom_id': fields.many2one('mrp.bom', 'Bill of Material', readonly=True, states={'draft': [('readonly', False)]},
help="Bill of Materials allow you to define the list of required raw materials to make a finished product."),
'routing_id': fields.many2one('mrp.routing', string='Routing', on_delete='set null', readonly=True, states={'draft': [('readonly', False)]},
help="The list of operations (list of work centers) to produce the finished product. The routing is mainly used to compute work center costs during operations and to plan future loads on work centers based on production plannification."),
'move_prod_id': fields.many2one('stock.move', 'Product Move', readonly=True, copy=False),
'move_lines': fields.one2many('stock.move', 'raw_material_production_id', 'Products to Consume',
domain=[('state', 'not in', ('done', 'cancel'))], readonly=True, states={'draft': [('readonly', False)]}),
'move_lines2': fields.one2many('stock.move', 'raw_material_production_id', 'Consumed Products',
domain=[('state', 'in', ('done', 'cancel'))], readonly=True),
'move_created_ids': fields.one2many('stock.move', 'production_id', 'Products to Produce',
domain=[('state', 'not in', ('done', 'cancel'))], readonly=True),
'move_created_ids2': fields.one2many('stock.move', 'production_id', 'Produced Products',
domain=[('state', 'in', ('done', 'cancel'))], readonly=True),
'product_lines': fields.one2many('mrp.production.product.line', 'production_id', 'Scheduled goods',
readonly=True),
'workcenter_lines': fields.one2many('mrp.production.workcenter.line', 'production_id', 'Work Centers Utilisation',
readonly=True, states={'draft': [('readonly', False)]}),
'state': fields.selection(
[('draft', 'New'), ('cancel', 'Cancelled'), ('confirmed', 'Awaiting Raw Materials'),
('ready', 'Ready to Produce'), ('in_production', 'Production Started'), ('done', 'Done')],
string='Status', readonly=True,
track_visibility='onchange', copy=False,
help="When the production order is created the status is set to 'Draft'.\n\
If the order is confirmed the status is set to 'Waiting Goods'.\n\
If any exceptions are there, the status is set to 'Picking Exception'.\n\
If the stock is available then the status is set to 'Ready to Produce'.\n\
When the production gets started then the status is set to 'In Production'.\n\
When the production is over, the status is set to 'Done'."),
'hour_total': fields.function(_production_calc, type='float', string='Total Hours', multi='workorder', store={
_name: (lambda self, cr, uid, ids, c={}: ids, ['workcenter_lines'], 40),
'mrp.production.workcenter.line': (_get_workcenter_line, ['hour', 'cycle'], 40),
}),
'cycle_total': fields.function(_production_calc, type='float', string='Total Cycles', multi='workorder', store={
_name: (lambda self, cr, uid, ids, c={}: ids, ['workcenter_lines'], 40),
'mrp.production.workcenter.line': (_get_workcenter_line, ['hour', 'cycle'], 40),
}),
'user_id': fields.many2one('res.users', 'Responsible'),
'company_id': fields.many2one('res.company', 'Company', required=True),
'ready_production': fields.function(_moves_assigned, type='boolean', store={'stock.move': (_mrp_from_move, ['state'], 10)}),
}
_defaults = {
'priority': lambda *a: '1',
'state': lambda *a: 'draft',
'date_planned': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'product_qty': lambda *a: 1.0,
'user_id': lambda self, cr, uid, c: uid,
'name': lambda self, cr, uid, context: self.pool['ir.sequence'].get(cr, uid, 'mrp.production', context=context) or '/',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.production', context=c),
'location_src_id': _src_id_default,
'location_dest_id': _dest_id_default
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Reference must be unique per Company!'),
]
_order = 'priority desc, date_planned asc'
def _check_qty(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids, context=context):
if order.product_qty <= 0:
return False
return True
_constraints = [
(_check_qty, 'Order quantity cannot be negative or zero!', ['product_qty']),
]
def unlink(self, cr, uid, ids, context=None):
for production in self.browse(cr, uid, ids, context=context):
if production.state not in ('draft', 'cancel'):
raise osv.except_osv(_('Invalid Action!'), _('Cannot delete a manufacturing order in state \'%s\'.') % production.state)
return super(mrp_production, self).unlink(cr, uid, ids, context=context)
def location_id_change(self, cr, uid, ids, src, dest, context=None):
""" Changes destination location if source location is changed.
@param src: Source location id.
@param dest: Destination location id.
@return: Dictionary of values.
"""
if dest:
return {}
if src:
return {'value': {'location_dest_id': src}}
return {}
def product_id_change(self, cr, uid, ids, product_id, product_qty=0, context=None):
""" Finds UoM of changed product.
@param product_id: Id of changed product.
@return: Dictionary of values.
"""
result = {}
if not product_id:
return {'value': {
'product_uom': False,
'bom_id': False,
'routing_id': False,
'product_uos_qty': 0,
'product_uos': False
}}
bom_obj = self.pool.get('mrp.bom')
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
bom_id = bom_obj._bom_find(cr, uid, product_id=product.id, properties=[], context=context)
routing_id = False
if bom_id:
bom_point = bom_obj.browse(cr, uid, bom_id, context=context)
routing_id = bom_point.routing_id.id or False
product_uom_id = product.uom_id and product.uom_id.id or False
result['value'] = {'product_uos_qty': 0, 'product_uos': False, 'product_uom': product_uom_id, 'bom_id': bom_id, 'routing_id': routing_id}
if product.uos_id.id:
result['value']['product_uos_qty'] = product_qty * product.uos_coeff
result['value']['product_uos'] = product.uos_id.id
return result
def bom_id_change(self, cr, uid, ids, bom_id, context=None):
""" Finds routing for changed BoM.
@param product: Id of product.
@return: Dictionary of values.
"""
if not bom_id:
return {'value': {
'routing_id': False
}}
bom_point = self.pool.get('mrp.bom').browse(cr, uid, bom_id, context=context)
routing_id = bom_point.routing_id.id or False
result = {
'routing_id': routing_id
}
return {'value': result}
def _prepare_lines(self, cr, uid, production, properties=None, context=None):
# search BoM structure and route
bom_obj = self.pool.get('mrp.bom')
uom_obj = self.pool.get('product.uom')
bom_point = production.bom_id
bom_id = production.bom_id.id
if not bom_point:
bom_id = bom_obj._bom_find(cr, uid, product_id=production.product_id.id, properties=properties, context=context)
if bom_id:
bom_point = bom_obj.browse(cr, uid, bom_id)
routing_id = bom_point.routing_id.id or False
self.write(cr, uid, [production.id], {'bom_id': bom_id, 'routing_id': routing_id})
if not bom_id:
raise osv.except_osv(_('Error!'), _("Cannot find a bill of material for this product."))
# get components and workcenter_lines from BoM structure
factor = uom_obj._compute_qty(cr, uid, production.product_uom.id, production.product_qty, bom_point.product_uom.id)
# product_lines, workcenter_lines
return bom_obj._bom_explode(cr, uid, bom_point, production.product_id, factor / bom_point.product_qty, properties, routing_id=production.routing_id.id, context=context)
def _action_compute_lines(self, cr, uid, ids, properties=None, context=None):
""" Compute product_lines and workcenter_lines from BoM structure
@return: product_lines
"""
if properties is None:
properties = []
results = []
prod_line_obj = self.pool.get('mrp.production.product.line')
workcenter_line_obj = self.pool.get('mrp.production.workcenter.line')
for production in self.browse(cr, uid, ids, context=context):
#unlink product_lines
prod_line_obj.unlink(cr, SUPERUSER_ID, [line.id for line in production.product_lines], context=context)
#unlink workcenter_lines
workcenter_line_obj.unlink(cr, SUPERUSER_ID, [line.id for line in production.workcenter_lines], context=context)
res = self._prepare_lines(cr, uid, production, properties=properties, context=context)
results = res[0] # product_lines
results2 = res[1] # workcenter_lines
# reset product_lines in production order
for line in results:
line['production_id'] = production.id
prod_line_obj.create(cr, uid, line)
#reset workcenter_lines in production order
for line in results2:
line['production_id'] = production.id
workcenter_line_obj.create(cr, uid, line, context)
return results
def action_compute(self, cr, uid, ids, properties=None, context=None):
""" Computes bills of material of a product.
@param properties: List containing dictionaries of properties.
@return: No. of products.
"""
return len(self._action_compute_lines(cr, uid, ids, properties=properties, context=context))
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels the production order and related stock moves.
@return: True
"""
if context is None:
context = {}
move_obj = self.pool.get('stock.move')
proc_obj = self.pool.get('procurement.order')
for production in self.browse(cr, uid, ids, context=context):
if production.move_created_ids:
move_obj.action_cancel(cr, uid, [x.id for x in production.move_created_ids])
procs = proc_obj.search(cr, uid, [('move_dest_id', 'in', [x.id for x in production.move_lines])], context=context)
if procs:
proc_obj.cancel(cr, uid, procs, context=context)
move_obj.action_cancel(cr, uid, [x.id for x in production.move_lines])
self.write(cr, uid, ids, {'state': 'cancel'})
# Put related procurements in exception
proc_obj = self.pool.get("procurement.order")
procs = proc_obj.search(cr, uid, [('production_id', 'in', ids)], context=context)
if procs:
proc_obj.message_post(cr, uid, procs, body=_('Manufacturing order cancelled.'), context=context)
proc_obj.write(cr, uid, procs, {'state': 'exception'}, context=context)
return True
def action_ready(self, cr, uid, ids, context=None):
""" Changes the production state to Ready and location id of stock move.
@return: True
"""
move_obj = self.pool.get('stock.move')
self.write(cr, uid, ids, {'state': 'ready'})
for production in self.browse(cr, uid, ids, context=context):
if not production.move_created_ids:
self._make_production_produce_line(cr, uid, production, context=context)
if production.move_prod_id and production.move_prod_id.location_id.id != production.location_dest_id.id:
move_obj.write(cr, uid, [production.move_prod_id.id],
{'location_id': production.location_dest_id.id})
return True
def action_production_end(self, cr, uid, ids, context=None):
""" Changes production state to Finish and writes finished date.
@return: True
"""
for production in self.browse(cr, uid, ids):
self._costs_generate(cr, uid, production)
write_res = self.write(cr, uid, ids, {'state': 'done', 'date_finished': time.strftime('%Y-%m-%d %H:%M:%S')})
# Check related procurements
proc_obj = self.pool.get("procurement.order")
procs = proc_obj.search(cr, uid, [('production_id', 'in', ids)], context=context)
proc_obj.check(cr, uid, procs, context=context)
return write_res
def test_production_done(self, cr, uid, ids):
""" Tests whether production is done or not.
@return: True or False
"""
res = True
for production in self.browse(cr, uid, ids):
if production.move_lines:
res = False
if production.move_created_ids:
res = False
return res
def _get_subproduct_factor(self, cr, uid, production_id, move_id=None, context=None):
""" Compute the factor to compute the qty of procucts to produce for the given production_id. By default,
it's always equal to the quantity encoded in the production order or the production wizard, but if the
module mrp_subproduct is installed, then we must use the move_id to identify the product to produce
and its quantity.
:param production_id: ID of the mrp.order
:param move_id: ID of the stock move that needs to be produced. Will be used in mrp_subproduct.
:return: The factor to apply to the quantity that we should produce for the given production order.
"""
return 1
def _get_produced_qty(self, cr, uid, production, context=None):
''' returns the produced quantity of product 'production.product_id' for the given production, in the product UoM
'''
produced_qty = 0
for produced_product in production.move_created_ids2:
if (produced_product.scrapped) or (produced_product.product_id.id != production.product_id.id):
continue
produced_qty += produced_product.product_qty
return produced_qty
def _get_consumed_data(self, cr, uid, production, context=None):
''' returns a dictionary containing for each raw material of the given production, its quantity already consumed (in the raw material UoM)
'''
consumed_data = {}
# Calculate already consumed qtys
for consumed in production.move_lines2:
if consumed.scrapped:
continue
if not consumed_data.get(consumed.product_id.id, False):
consumed_data[consumed.product_id.id] = 0
consumed_data[consumed.product_id.id] += consumed.product_qty
return consumed_data
def _calculate_qty(self, cr, uid, production, product_qty=0.0, context=None):
"""
Calculates the quantity still needed to produce an extra number of products
product_qty is in the uom of the product
"""
quant_obj = self.pool.get("stock.quant")
uom_obj = self.pool.get("product.uom")
produced_qty = self._get_produced_qty(cr, uid, production, context=context)
consumed_data = self._get_consumed_data(cr, uid, production, context=context)
#In case no product_qty is given, take the remaining qty to produce for the given production
if not product_qty:
product_qty = uom_obj._compute_qty(cr, uid, production.product_uom.id, production.product_qty, production.product_id.uom_id.id) - produced_qty
production_qty = uom_obj._compute_qty(cr, uid, production.product_uom.id, production.product_qty, production.product_id.uom_id.id)
scheduled_qty = OrderedDict()
for scheduled in production.product_lines:
if scheduled.product_id.type == 'service':
continue
qty = uom_obj._compute_qty(cr, uid, scheduled.product_uom.id, scheduled.product_qty, scheduled.product_id.uom_id.id)
if scheduled_qty.get(scheduled.product_id.id):
scheduled_qty[scheduled.product_id.id] += qty
else:
scheduled_qty[scheduled.product_id.id] = qty
dicts = OrderedDict()
# Find product qty to be consumed and consume it
for product_id in scheduled_qty.keys():
consumed_qty = consumed_data.get(product_id, 0.0)
# qty available for consume and produce
sched_product_qty = scheduled_qty[product_id]
qty_avail = sched_product_qty - consumed_qty
if qty_avail <= 0.0:
# there will be nothing to consume for this raw material
continue
if not dicts.get(product_id):
dicts[product_id] = {}
# total qty of consumed product we need after this consumption
if product_qty + produced_qty <= production_qty:
total_consume = ((product_qty + produced_qty) * sched_product_qty / production_qty)
else:
total_consume = sched_product_qty
qty = total_consume - consumed_qty
# Search for quants related to this related move
for move in production.move_lines:
if qty <= 0.0:
break
if move.product_id.id != product_id:
continue
q = min(move.product_qty, qty)
quants = quant_obj.quants_get_prefered_domain(cr, uid, move.location_id, move.product_id, q, domain=[('qty', '>', 0.0)],
prefered_domain_list=[[('reservation_id', '=', move.id)]], context=context)
for quant, quant_qty in quants:
if quant:
lot_id = quant.lot_id.id
if not product_id in dicts.keys():
dicts[product_id] = {lot_id: quant_qty}
elif lot_id in dicts[product_id].keys():
dicts[product_id][lot_id] += quant_qty
else:
dicts[product_id][lot_id] = quant_qty
qty -= quant_qty
if float_compare(qty, 0, self.pool['decimal.precision'].precision_get(cr, uid, 'Product Unit of Measure')) == 1:
if dicts[product_id].get(False):
dicts[product_id][False] += qty
else:
dicts[product_id][False] = qty
consume_lines = []
for prod in dicts.keys():
for lot, qty in dicts[prod].items():
consume_lines.append({'product_id': prod, 'product_qty': qty, 'lot_id': lot})
return consume_lines
def action_produce(self, cr, uid, production_id, production_qty, production_mode, wiz=False, context=None):
""" To produce final product based on production mode (consume/consume&produce).
If Production mode is consume, all stock move lines of raw materials will be done/consumed.
If Production mode is consume & produce, all stock move lines of raw materials will be done/consumed
and stock move lines of final product will be also done/produced.
@param production_id: the ID of mrp.production object
@param production_qty: specify qty to produce in the uom of the production order
@param production_mode: specify production mode (consume/consume&produce).
@param wiz: the mrp produce product wizard, which will tell the amount of consumed products needed
@return: True
"""
stock_mov_obj = self.pool.get('stock.move')
uom_obj = self.pool.get("product.uom")
production = self.browse(cr, uid, production_id, context=context)
production_qty_uom = uom_obj._compute_qty(cr, uid, production.product_uom.id, production_qty, production.product_id.uom_id.id)
precision = self.pool['decimal.precision'].precision_get(cr, uid, 'Product Unit of Measure')
main_production_move = False
if production_mode == 'consume_produce':
# To produce remaining qty of final product
produced_products = {}
for produced_product in production.move_created_ids2:
if produced_product.scrapped:
continue
if not produced_products.get(produced_product.product_id.id, False):
produced_products[produced_product.product_id.id] = 0
produced_products[produced_product.product_id.id] += produced_product.product_qty
for produce_product in production.move_created_ids:
subproduct_factor = self._get_subproduct_factor(cr, uid, production.id, produce_product.id, context=context)
lot_id = False
if wiz:
lot_id = wiz.lot_id.id
qty = min(subproduct_factor * production_qty_uom, produce_product.product_qty) #Needed when producing more than maximum quantity
new_moves = stock_mov_obj.action_consume(cr, uid, [produce_product.id], qty,
location_id=produce_product.location_id.id, restrict_lot_id=lot_id, context=context)
stock_mov_obj.write(cr, uid, new_moves, {'production_id': production_id}, context=context)
remaining_qty = subproduct_factor * production_qty_uom - qty
if not float_is_zero(remaining_qty, precision_digits=precision):
# In case you need to make more than planned
#consumed more in wizard than previously planned
extra_move_id = stock_mov_obj.copy(cr, uid, produce_product.id, default={'product_uom_qty': remaining_qty,
'production_id': production_id}, context=context)
stock_mov_obj.action_confirm(cr, uid, [extra_move_id], context=context)
stock_mov_obj.action_done(cr, uid, [extra_move_id], context=context)
if produce_product.product_id.id == production.product_id.id:
main_production_move = produce_product.id
if production_mode in ['consume', 'consume_produce']:
if wiz:
consume_lines = []
for cons in wiz.consume_lines:
consume_lines.append({'product_id': cons.product_id.id, 'lot_id': cons.lot_id.id, 'product_qty': cons.product_qty})
else:
consume_lines = self._calculate_qty(cr, uid, production, production_qty_uom, context=context)
for consume in consume_lines:
remaining_qty = consume['product_qty']
for raw_material_line in production.move_lines:
if raw_material_line.state in ('done', 'cancel'):
continue
if remaining_qty <= 0:
break
if consume['product_id'] != raw_material_line.product_id.id:
continue
consumed_qty = min(remaining_qty, raw_material_line.product_qty)
stock_mov_obj.action_consume(cr, uid, [raw_material_line.id], consumed_qty, raw_material_line.location_id.id,
restrict_lot_id=consume['lot_id'], consumed_for=main_production_move, context=context)
remaining_qty -= consumed_qty
if not float_is_zero(remaining_qty, precision_digits=precision):
#consumed more in wizard than previously planned
product = self.pool.get('product.product').browse(cr, uid, consume['product_id'], context=context)
extra_move_id = self._make_consume_line_from_data(cr, uid, production, product, product.uom_id.id, remaining_qty, False, 0, context=context)
stock_mov_obj.write(cr, uid, [extra_move_id], {'restrict_lot_id': consume['lot_id'],
'consumed_for': main_production_move}, context=context)
stock_mov_obj.action_done(cr, uid, [extra_move_id], context=context)
self.message_post(cr, uid, production_id, body=_("%s produced") % self._description, context=context)
# Remove remaining products to consume if no more products to produce
if not production.move_created_ids and production.move_lines:
stock_mov_obj.action_cancel(cr, uid, [x.id for x in production.move_lines], context=context)
self.signal_workflow(cr, uid, [production_id], 'button_produce_done')
return True
def _costs_generate(self, cr, uid, production):
""" Calculates total costs at the end of the production.
@param production: Id of production order.
@return: Calculated amount.
"""
amount = 0.0
analytic_line_obj = self.pool.get('account.analytic.line')
for wc_line in production.workcenter_lines:
wc = wc_line.workcenter_id
if wc.costs_journal_id and wc.costs_general_account_id:
# Cost per hour
value = wc_line.hour * wc.costs_hour
account = wc.costs_hour_account_id.id
if value and account:
amount += value
# we user SUPERUSER_ID as we do not garantee an mrp user
# has access to account analytic lines but still should be
# able to produce orders
analytic_line_obj.create(cr, SUPERUSER_ID, {
'name': wc_line.name + ' (H)',
'amount': value,
'account_id': account,
'general_account_id': wc.costs_general_account_id.id,
'journal_id': wc.costs_journal_id.id,
'ref': wc.code,
'product_id': wc.product_id.id,
'unit_amount': wc_line.hour,
'product_uom_id': wc.product_id and wc.product_id.uom_id.id or False
})
# Cost per cycle
value = wc_line.cycle * wc.costs_cycle
account = wc.costs_cycle_account_id.id
if value and account:
amount += value
analytic_line_obj.create(cr, SUPERUSER_ID, {
'name': wc_line.name + ' (C)',
'amount': value,
'account_id': account,
'general_account_id': wc.costs_general_account_id.id,
'journal_id': wc.costs_journal_id.id,
'ref': wc.code,
'product_id': wc.product_id.id,
'unit_amount': wc_line.cycle,
'product_uom_id': wc.product_id and wc.product_id.uom_id.id or False
})
return amount
def action_in_production(self, cr, uid, ids, context=None):
""" Changes state to In Production and writes starting date.
@return: True
"""
return self.write(cr, uid, ids, {'state': 'in_production', 'date_start': time.strftime('%Y-%m-%d %H:%M:%S')})
def consume_lines_get(self, cr, uid, ids, *args):
res = []
for order in self.browse(cr, uid, ids, context={}):
res += [x.id for x in order.move_lines]
return res
def test_ready(self, cr, uid, ids):
res = True
for production in self.browse(cr, uid, ids):
if production.move_lines and not production.ready_production:
res = False
return res
def _make_production_produce_line(self, cr, uid, production, context=None):
stock_move = self.pool.get('stock.move')
proc_obj = self.pool.get('procurement.order')
source_location_id = production.product_id.property_stock_production.id
destination_location_id = production.location_dest_id.id
procs = proc_obj.search(cr, uid, [('production_id', '=', production.id)], context=context)
procurement = procs and\
proc_obj.browse(cr, uid, procs[0], context=context) or False
data = {
'name': production.name,
'date': production.date_planned,
'product_id': production.product_id.id,
'product_uom': production.product_uom.id,
'product_uom_qty': production.product_qty,
'product_uos_qty': production.product_uos and production.product_uos_qty or False,
'product_uos': production.product_uos and production.product_uos.id or False,
'location_id': source_location_id,
'location_dest_id': destination_location_id,
'move_dest_id': production.move_prod_id.id,
'procurement_id': procurement and procurement.id,
'company_id': production.company_id.id,
'production_id': production.id,
'origin': production.name,
'group_id': procurement and procurement.group_id.id,
}
move_id = stock_move.create(cr, uid, data, context=context)
#a phantom bom cannot be used in mrp order so it's ok to assume the list returned by action_confirm
#is 1 element long, so we can take the first.
return stock_move.action_confirm(cr, uid, [move_id], context=context)[0]
def _get_raw_material_procure_method(self, cr, uid, product, location_id=False, location_dest_id=False, context=None):
'''This method returns the procure_method to use when creating the stock move for the production raw materials
Besides the standard configuration of looking if the product or product category has the MTO route,
you can also define a rule e.g. from Stock to Production (which might be used in the future like the sale orders)
'''
warehouse_obj = self.pool['stock.warehouse']
routes = product.route_ids + product.categ_id.total_route_ids
if location_id and location_dest_id:
pull_obj = self.pool['procurement.rule']
pulls = pull_obj.search(cr, uid, [('route_id', 'in', [x.id for x in routes]),
('location_id', '=', location_dest_id),
('location_src_id', '=', location_id)], limit=1, context=context)
if pulls:
return pull_obj.browse(cr, uid, pulls[0], context=context).procure_method
try:
mto_route = warehouse_obj._get_mto_route(cr, uid, context=context)
except:
return "make_to_stock"
if mto_route in [x.id for x in routes]:
return "make_to_order"
return "make_to_stock"
def _create_previous_move(self, cr, uid, move_id, product, source_location_id, dest_location_id, context=None):
'''
When the routing gives a different location than the raw material location of the production order,
we should create an extra move from the raw material location to the location of the routing, which
precedes the consumption line (chained). The picking type depends on the warehouse in which this happens
and the type of locations.
'''
loc_obj = self.pool.get("stock.location")
stock_move = self.pool.get('stock.move')
type_obj = self.pool.get('stock.picking.type')
# Need to search for a picking type
move = stock_move.browse(cr, uid, move_id, context=context)
src_loc = loc_obj.browse(cr, uid, source_location_id, context=context)
dest_loc = loc_obj.browse(cr, uid, dest_location_id, context=context)
code = stock_move.get_code_from_locs(cr, uid, move, src_loc, dest_loc, context=context)
if code == 'outgoing':
check_loc = src_loc
else:
check_loc = dest_loc
wh = loc_obj.get_warehouse(cr, uid, check_loc, context=context)
domain = [('code', '=', code)]
if wh:
domain += [('warehouse_id', '=', wh)]
types = type_obj.search(cr, uid, domain, context=context)
move = stock_move.copy(cr, uid, move_id, default = {
'location_id': source_location_id,
'location_dest_id': dest_location_id,
'procure_method': self._get_raw_material_procure_method(cr, uid, product, location_id=source_location_id,
location_dest_id=dest_location_id, context=context),
'raw_material_production_id': False,
'move_dest_id': move_id,
'picking_type_id': types and types[0] or False,
}, context=context)
return move
def _make_consume_line_from_data(self, cr, uid, production, product, uom_id, qty, uos_id, uos_qty, context=None):
stock_move = self.pool.get('stock.move')
loc_obj = self.pool.get('stock.location')
# Internal shipment is created for Stockable and Consumer Products
if product.type not in ('product', 'consu'):
return False
# Take routing location as a Source Location.
source_location_id = production.location_src_id.id
prod_location_id = source_location_id
prev_move= False
if production.bom_id.routing_id and production.bom_id.routing_id.location_id and production.bom_id.routing_id.location_id.id != source_location_id:
source_location_id = production.bom_id.routing_id.location_id.id
prev_move = True
destination_location_id = production.product_id.property_stock_production.id
move_id = stock_move.create(cr, uid, {
'name': production.name,
'date': production.date_planned,
'product_id': product.id,
'product_uom_qty': qty,
'product_uom': uom_id,
'product_uos_qty': uos_id and uos_qty or False,
'product_uos': uos_id or False,
'location_id': source_location_id,
'location_dest_id': destination_location_id,
'company_id': production.company_id.id,
'procure_method': prev_move and 'make_to_stock' or self._get_raw_material_procure_method(cr, uid, product, location_id=source_location_id,
location_dest_id=destination_location_id, context=context), #Make_to_stock avoids creating procurement
'raw_material_production_id': production.id,
#this saves us a browse in create()
'price_unit': product.standard_price,
'origin': production.name,
'warehouse_id': loc_obj.get_warehouse(cr, uid, production.location_src_id, context=context),
'group_id': production.move_prod_id.group_id.id,
}, context=context)
if prev_move:
prev_move = self._create_previous_move(cr, uid, move_id, product, prod_location_id, source_location_id, context=context)
stock_move.action_confirm(cr, uid, [prev_move], context=context)
return move_id
def _make_production_consume_line(self, cr, uid, line, context=None):
return self._make_consume_line_from_data(cr, uid, line.production_id, line.product_id, line.product_uom.id, line.product_qty, line.product_uos.id, line.product_uos_qty, context=context)
def _make_service_procurement(self, cr, uid, line, context=None):
prod_obj = self.pool.get('product.product')
if prod_obj.need_procurement(cr, uid, [line.product_id.id], context=context):
vals = {
'name': line.production_id.name,
'origin': line.production_id.name,
'company_id': line.production_id.company_id.id,
'date_planned': line.production_id.date_planned,
'product_id': line.product_id.id,
'product_qty': line.product_qty,
'product_uom': line.product_uom.id,
'product_uos_qty': line.product_uos_qty,
'product_uos': line.product_uos.id,
}
proc_obj = self.pool.get("procurement.order")
proc = proc_obj.create(cr, uid, vals, context=context)
proc_obj.run(cr, uid, [proc], context=context)
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms production order.
@return: Newly generated Shipment Id.
"""
user_lang = self.pool.get('res.users').browse(cr, uid, [uid]).partner_id.lang
context = dict(context, lang=user_lang)
uncompute_ids = filter(lambda x: x, [not x.product_lines and x.id or False for x in self.browse(cr, uid, ids, context=context)])
self.action_compute(cr, uid, uncompute_ids, context=context)
for production in self.browse(cr, uid, ids, context=context):
self._make_production_produce_line(cr, uid, production, context=context)
stock_moves = []
for line in production.product_lines:
if line.product_id.type != 'service':
stock_move_id = self._make_production_consume_line(cr, uid, line, context=context)
stock_moves.append(stock_move_id)
else:
self._make_service_procurement(cr, uid, line, context=context)
if stock_moves:
self.pool.get('stock.move').action_confirm(cr, uid, stock_moves, context=context)
production.write({'state': 'confirmed'})
return 0
def action_assign(self, cr, uid, ids, context=None):
"""
Checks the availability on the consume lines of the production order
"""
from openerp import workflow
move_obj = self.pool.get("stock.move")
for production in self.browse(cr, uid, ids, context=context):
move_obj.action_assign(cr, uid, [x.id for x in production.move_lines], context=context)
if self.pool.get('mrp.production').test_ready(cr, uid, [production.id]):
workflow.trg_validate(uid, 'mrp.production', production.id, 'moves_ready', cr)
def force_production(self, cr, uid, ids, *args):
""" Assigns products.
@param *args: Arguments
@return: True
"""
from openerp import workflow
move_obj = self.pool.get('stock.move')
for order in self.browse(cr, uid, ids):
move_obj.force_assign(cr, uid, [x.id for x in order.move_lines])
if self.pool.get('mrp.production').test_ready(cr, uid, [order.id]):
workflow.trg_validate(uid, 'mrp.production', order.id, 'moves_ready', cr)
return True
class mrp_production_workcenter_line(osv.osv):
_name = 'mrp.production.workcenter.line'
_description = 'Work Order'
_order = 'sequence'
_inherit = ['mail.thread']
_columns = {
'name': fields.char('Work Order', required=True),
'workcenter_id': fields.many2one('mrp.workcenter', 'Work Center', required=True),
'cycle': fields.float('Number of Cycles', digits=(16, 2)),
'hour': fields.float('Number of Hours', digits=(16, 2)),
'sequence': fields.integer('Sequence', required=True, help="Gives the sequence order when displaying a list of work orders."),
'production_id': fields.many2one('mrp.production', 'Manufacturing Order',
track_visibility='onchange', select=True, ondelete='cascade', required=True),
}
_defaults = {
'sequence': lambda *a: 1,
'hour': lambda *a: 0,
'cycle': lambda *a: 0,
}
class mrp_production_product_line(osv.osv):
_name = 'mrp.production.product.line'
_description = 'Production Scheduled Product'
_columns = {
'name': fields.char('Name', required=True),
'product_id': fields.many2one('product.product', 'Product', required=True),
'product_qty': fields.float('Product Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_uos_qty': fields.float('Product UOS Quantity'),
'product_uos': fields.many2one('product.uom', 'Product UOS'),
'production_id': fields.many2one('mrp.production', 'Production Order', select=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yufengg/tensorflow | tensorflow/contrib/data/python/kernel_tests/reader_dataset_ops_test.py | 10 | 20568 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import python_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class TextLineDatasetTest(test.TestCase):
def _lineText(self, f, l):
return compat.as_bytes("%d: %d" % (f, l))
def _createFiles(self, num_files, num_lines, crlf=False):
filenames = []
for i in range(num_files):
fn = os.path.join(self.get_temp_dir(), "text_line.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
for j in range(num_lines):
f.write(self._lineText(i, j))
# Always include a newline after the record unless it is
# at the end of the file, in which case we include it sometimes.
if j + 1 != num_lines or i == 0:
f.write(b"\r\n" if crlf else b"\n")
return filenames
def testTextLineDataset(self):
test_filenames = self._createFiles(2, 5, crlf=True)
filenames = array_ops.placeholder(dtypes.string, shape=[None])
num_epochs = array_ops.placeholder(dtypes.int64, shape=[])
batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = dataset_ops.TextLineDataset(filenames).repeat(num_epochs)
batch_dataset = repeat_dataset.batch(batch_size)
iterator = dataset_ops.Iterator.from_structure(batch_dataset.output_types)
init_op = iterator.make_initializer(repeat_dataset)
init_batch_op = iterator.make_initializer(batch_dataset)
get_next = iterator.get_next()
with self.test_session() as sess:
# Basic test: read from file 0.
sess.run(init_op, feed_dict={filenames: [test_filenames[0]],
num_epochs: 1})
for i in range(5):
self.assertEqual(self._lineText(0, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from file 1.
sess.run(init_op, feed_dict={filenames: [test_filenames[1]],
num_epochs: 1})
for i in range(5):
self.assertEqual(self._lineText(1, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from both files.
sess.run(init_op, feed_dict={filenames: test_filenames,
num_epochs: 1})
for j in range(2):
for i in range(5):
self.assertEqual(self._lineText(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test repeated iteration through both files.
sess.run(init_op, feed_dict={filenames: test_filenames,
num_epochs: 10})
for _ in range(10):
for j in range(2):
for i in range(5):
self.assertEqual(self._lineText(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test batched and repeated iteration through both files.
sess.run(init_batch_op, feed_dict={filenames: test_filenames,
num_epochs: 10,
batch_size: 5})
for _ in range(10):
self.assertAllEqual([self._lineText(0, i) for i in range(5)],
sess.run(get_next))
self.assertAllEqual([self._lineText(1, i) for i in range(5)],
sess.run(get_next))
class FixedLengthRecordReaderTest(test.TestCase):
def setUp(self):
super(FixedLengthRecordReaderTest, self).setUp()
self._num_files = 2
self._num_records = 7
self._header_bytes = 5
self._record_bytes = 3
self._footer_bytes = 2
def _record(self, f, r):
return compat.as_bytes(str(f * 2 + r) * self._record_bytes)
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
for j in range(self._num_records):
f.write(self._record(i, j))
f.write(b"F" * self._footer_bytes)
return filenames
def testFixedLengthRecordDataset(self):
test_filenames = self._createFiles()
filenames = array_ops.placeholder(dtypes.string, shape=[None])
num_epochs = array_ops.placeholder(dtypes.int64, shape=[])
batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = (dataset_ops.FixedLengthRecordDataset(
filenames, self._record_bytes, self._header_bytes, self._footer_bytes)
.repeat(num_epochs))
batch_dataset = repeat_dataset.batch(batch_size)
iterator = dataset_ops.Iterator.from_structure(batch_dataset.output_types)
init_op = iterator.make_initializer(repeat_dataset)
init_batch_op = iterator.make_initializer(batch_dataset)
get_next = iterator.get_next()
with self.test_session() as sess:
# Basic test: read from file 0.
sess.run(init_op, feed_dict={filenames: [test_filenames[0]],
num_epochs: 1})
for i in range(self._num_records):
self.assertEqual(self._record(0, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from file 1.
sess.run(init_op, feed_dict={filenames: [test_filenames[1]],
num_epochs: 1})
for i in range(self._num_records):
self.assertEqual(self._record(1, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from both files.
sess.run(init_op, feed_dict={filenames: test_filenames,
num_epochs: 1})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertEqual(self._record(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test repeated iteration through both files.
sess.run(init_op, feed_dict={filenames: test_filenames,
num_epochs: 10})
for _ in range(10):
for j in range(self._num_files):
for i in range(self._num_records):
self.assertEqual(self._record(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test batched and repeated iteration through both files.
sess.run(init_batch_op, feed_dict={filenames: test_filenames,
num_epochs: 10,
batch_size: self._num_records})
for _ in range(10):
for j in range(self._num_files):
self.assertAllEqual([self._record(j, i)
for i in range(self._num_records)],
sess.run(get_next))
class TFRecordDatasetTest(test.TestCase):
def setUp(self):
super(TFRecordDatasetTest, self).setUp()
self._num_files = 2
self._num_records = 7
self.test_filenames = self._createFiles()
self.filenames = array_ops.placeholder(dtypes.string, shape=[None])
self.num_epochs = array_ops.placeholder_with_default(
constant_op.constant(1, dtypes.int64), shape=[])
self.compression_type = array_ops.placeholder_with_default("", shape=[])
self.batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = dataset_ops.TFRecordDataset(
self.filenames, self.compression_type).repeat(self.num_epochs)
batch_dataset = repeat_dataset.batch(self.batch_size)
iterator = dataset_ops.Iterator.from_structure(batch_dataset.output_types)
self.init_op = iterator.make_initializer(repeat_dataset)
self.init_batch_op = iterator.make_initializer(batch_dataset)
self.get_next = iterator.get_next()
def _record(self, f, r):
return compat.as_bytes("Record %d of file %d" % (r, f))
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._record(i, j))
writer.close()
return filenames
def testReadOneEpoch(self):
with self.test_session() as sess:
# Basic test: read from file 0.
sess.run(self.init_op,
feed_dict={self.filenames: [self.test_filenames[0]],
self.num_epochs: 1})
for i in range(self._num_records):
self.assertAllEqual(self._record(0, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
# Basic test: read from file 1.
sess.run(self.init_op,
feed_dict={self.filenames: [self.test_filenames[1]],
self.num_epochs: 1})
for i in range(self._num_records):
self.assertAllEqual(self._record(1, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
# Basic test: read from both files.
sess.run(self.init_op,
feed_dict={self.filenames: self.test_filenames,
self.num_epochs: 1})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadTenEpochs(self):
with self.test_session() as sess:
sess.run(self.init_op, feed_dict={self.filenames: self.test_filenames,
self.num_epochs: 10})
for _ in range(10):
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadTenEpochsOfBatches(self):
with self.test_session() as sess:
sess.run(self.init_batch_op,
feed_dict={self.filenames: self.test_filenames,
self.num_epochs: 10,
self.batch_size: self._num_records})
for _ in range(10):
for j in range(self._num_files):
values = sess.run(self.get_next)
self.assertAllEqual([self._record(j, i)
for i in range(self._num_records)], values)
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadZlibFiles(self):
zlib_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zlib_files.append(zfn)
with self.test_session() as sess:
sess.run(self.init_op,
feed_dict={self.filenames: zlib_files,
self.compression_type: "ZLIB"})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadGzipFiles(self):
gzip_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
gzfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(gzfn, "wb") as gzf:
gzf.write(f.read())
gzip_files.append(gzfn)
with self.test_session() as sess:
sess.run(self.init_op,
feed_dict={self.filenames: gzip_files,
self.compression_type: "GZIP"})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
class ReadBatchFeaturesTest(test.TestCase):
def setUp(self):
super(ReadBatchFeaturesTest, self).setUp()
self._num_files = 2
self._num_records = 7
self.test_filenames = self._createFiles()
def _read_batch_features(self, filenames, num_epochs, batch_size):
self.filenames = filenames
self.num_epochs = num_epochs
self.batch_size = batch_size
return dataset_ops.read_batch_features(
file_pattern=self.filenames,
batch_size=self.batch_size,
features={
"file": parsing_ops.FixedLenFeature([], dtypes.int64),
"record": parsing_ops.FixedLenFeature([], dtypes.int64),
"keywords": parsing_ops.VarLenFeature(dtypes.string)
},
reader=dataset_ops.TFRecordDataset,
randomize_input=False,
num_epochs=self.num_epochs)
def _record(self, f, r):
example = example_pb2.Example(features=feature_pb2.Features(
feature={
"file":
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[f])),
"record":
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[r])),
"keywords":
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=self._get_keywords(f, r)))
}))
return example.SerializeToString()
def _get_keywords(self, f, r):
num_keywords = 1 + (f + r) % 2
keywords = []
for index in range(num_keywords):
keywords.append(compat.as_bytes("keyword%d" % index))
return keywords
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._record(i, j))
writer.close()
return filenames
def _next_actual_batch(self, sess):
file_op = self.outputs["file"]
keywords_indices_op = self.outputs["keywords"].indices
keywords_values_op = self.outputs["keywords"].values
keywords_dense_shape_op = self.outputs["keywords"].dense_shape
record_op = self.outputs["record"]
return sess.run([
file_op, keywords_indices_op, keywords_values_op,
keywords_dense_shape_op, record_op
])
def _next_expected_batch(self, file_indices, batch_size, num_epochs):
def _next_record(file_indices):
for j in file_indices:
for i in range(self._num_records):
yield j, i
file_batch = []
keywords_batch_indices = []
keywords_batch_values = []
keywords_batch_max_len = 0
record_batch = []
batch_index = 0
for _ in range(num_epochs):
for record in _next_record(file_indices):
f = record[0]
r = record[1]
file_batch.append(f)
record_batch.append(r)
keywords = self._get_keywords(f, r)
keywords_batch_values.extend(keywords)
keywords_batch_indices.extend([[batch_index, i]
for i in range(len(keywords))])
batch_index += 1
keywords_batch_max_len = max(keywords_batch_max_len, len(keywords))
if len(file_batch) == batch_size:
yield [
file_batch, keywords_batch_indices, keywords_batch_values,
[batch_size, keywords_batch_max_len], record_batch
]
file_batch = []
keywords_batch_indices = []
keywords_batch_values = []
keywords_batch_max_len = 0
record_batch = []
batch_index = 0
if file_batch:
yield [
file_batch, keywords_batch_indices, keywords_batch_values,
[len(file_batch), keywords_batch_max_len], record_batch
]
def _verify_records(self, sess, batch_size, file_index=None, num_epochs=1):
if file_index is not None:
file_indices = [file_index]
else:
file_indices = range(self._num_files)
for expected_batch in self._next_expected_batch(file_indices, batch_size,
num_epochs):
actual_batch = self._next_actual_batch(sess)
for i in range(len(expected_batch)):
self.assertAllEqual(expected_batch[i], actual_batch[i])
def testRead(self):
for batch_size in [1, 2]:
for num_epochs in [1, 10]:
with ops.Graph().as_default():
with self.test_session(graph=ops.get_default_graph()) as sess:
# Basic test: read from file 0.
self.outputs = self._read_batch_features(
filenames=self.test_filenames[0],
num_epochs=num_epochs,
batch_size=batch_size)
self._verify_records(sess, batch_size, 0, num_epochs=num_epochs)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(sess)
with ops.Graph().as_default():
with self.test_session(graph=ops.get_default_graph()) as sess:
# Basic test: read from file 1.
self.outputs = self._read_batch_features(
filenames=self.test_filenames[1],
num_epochs=num_epochs,
batch_size=batch_size)
self._verify_records(sess, batch_size, 1, num_epochs=num_epochs)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(sess)
with ops.Graph().as_default():
with self.test_session(graph=ops.get_default_graph()) as sess:
# Basic test: read from both files.
self.outputs = self._read_batch_features(
filenames=self.test_filenames,
num_epochs=num_epochs,
batch_size=batch_size)
self._verify_records(sess, batch_size, num_epochs=num_epochs)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(sess)
def testReadWithEquivalentDataset(self):
# TODO(mrry): Add support for tf.SparseTensor as a Dataset component.
features = {
"file": parsing_ops.FixedLenFeature([], dtypes.int64),
"record": parsing_ops.FixedLenFeature([], dtypes.int64),
}
dataset = (dataset_ops.TFRecordDataset(self.test_filenames)
.map(lambda x: parsing_ops.parse_single_example(x, features))
.repeat(10)
.batch(2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
next_element = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for file_batch, _, _, _, record_batch in self._next_expected_batch(
range(self._num_files), 2, 10):
actual_batch = sess.run(next_element)
self.assertAllEqual(file_batch, actual_batch["file"])
self.assertAllEqual(record_batch, actual_batch["record"])
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
if __name__ == "__main__":
test.main()
| apache-2.0 |
justathoughtor2/atomicApe | cygwin/lib/python2.7/site-packages/pylint/test/input/func_typecheck_callfunc_assigment.py | 7 | 1496 |
"""check assignment to function call where the function doesn't return
'E1111': ('Assigning to function call which doesn\'t return',
'Used when an assignment is done on a function call but the \
infered function doesn\'t return anything.'),
'W1111': ('Assigning to function call which only returns None',
'Used when an assignment is done on a function call but the \
infered function returns nothing but None.'),
"""
from __future__ import generators, print_function
#pylint: disable=redefined-variable-type
def func_no_return():
"""function without return"""
print('dougloup')
A = func_no_return()
def func_return_none():
"""function returning none"""
print('dougloup')
return None
A = func_return_none()
def func_implicit_return_none():
"""Function returning None from bare return statement."""
return
A = func_implicit_return_none()
def func_return_none_and_smth():
"""function returning none and something else"""
print('dougloup')
if 2 or 3:
return None
return 3
A = func_return_none_and_smth()
def generator():
"""no problemo"""
yield 2
A = generator()
class Abstract(object):
"""bla bla"""
def abstract_method(self):
"""use to return something in concrete implementation"""
raise NotImplementedError
def use_abstract(self):
"""should not issue E1111"""
var = self.abstract_method()
print(var)
| gpl-3.0 |
li-xiao-nan/gyp_tools | test/win/gyptest-link-safeseh.py | 32 | 1340 | #!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure safeseh setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['ninja'])
CHDIR = 'linker-flags'
test.run_gyp('safeseh.gyp', chdir=CHDIR)
test.build('safeseh.gyp', test.ALL, chdir=CHDIR)
def HasSafeExceptionHandlers(exe):
full_path = test.built_file_path(exe, chdir=CHDIR)
output = test.run_dumpbin('/LOADCONFIG', full_path)
return ' Safe Exception Handler Table' in output
# From MSDN: http://msdn.microsoft.com/en-us/library/9a89h429.aspx
# If /SAFESEH is not specified, the linker will produce an image with a
# table of safe exceptions handlers if all modules are compatible with
# the safe exception handling feature. If any modules were not
# compatible with safe exception handling feature, the resulting image
# will not contain a table of safe exception handlers.
if HasSafeExceptionHandlers('test_safeseh_default.exe'):
test.fail_test()
if HasSafeExceptionHandlers('test_safeseh_no.exe'):
test.fail_test()
if not HasSafeExceptionHandlers('test_safeseh_yes.exe'):
test.fail_test()
test.pass_test()
| bsd-3-clause |
trishume/linux-track | python_prototype/tir4_read_version_string.py | 7 | 3222 | #! /usr/bin/python
# -*- coding: utf-8 -*-
##bc##################################################################
## (C) Copyright 2009, All Rights Reserved.
##
## Name : tir4_read_version_string.py
## Author : DT Austin
## Created : 07/14/2009
## SVN date : $Date$
##
######################################################################
## Description: sends a request version message and
## prints the result to stdout
##ec##################################################################
import sys
from optparse import OptionParser
import time
try:
import tir4
except:
print("tir4.py missing")
sys.exit(1)
class GeneralException(Exception):
def __init__(self, msg):
self.msg = msg
def handle_exception(errorinfo):
if errorinfo.args[0] == tir4.TIR4EXCEPTION_ENUM.USB_LIST_FAILED:
print "Unable to obtain a list the USB devices.\nApplication will close."
sys.exit(1)
elif errorinfo.args[0] == tir4.TIR4EXCEPTION_ENUM.FIND_DEVICE_FAILED:
print "Unable to find TIR4 device.\nApplication will close."
sys.exit(1)
elif errorinfo.args[0] == tir4.TIR4EXCEPTION_ENUM.CREATE_HANDLE_FAILED:
print "Unable to create a handle for the TIR4 device.\nApplication will close."
sys.exit(1)
elif errorinfo.args[0] == tir4.TIR4EXCEPTION_ENUM.CLAIM_FAILED:
print "Unable to claim the TIR4 device.\nThis is most likely a USB permissions problem.\nRunning this app sudo may be a quick fix."
sys.exit(1)
elif errorinfo.args[0] == tir4.TIR4EXCEPTION_ENUM.DISCONNECT:
print "TIR4 device disconnected.\nApplication will close."
sys.exit(1)
elif errorinfo.args[0] == tir4.TIR4EXCEPTION_ENUM.UNKNOWN_READ_ERROR:
print "Unknown usb read error.\nApplication will close."
sys.exit(1)
elif errorinfo.args[0] == tir4.TIR4EXCEPTION_ENUM.UNKNOWN_PACKET:
print "Unknown usb read error.\nApplication will close."
sys.exit(1)
def main(argv=None):
usage = "usage: %prog [options] [filename]"
parser = OptionParser(usage=usage)
parser.add_option('--verbose', '-v',
help ='print debugging output',
action = 'store_true')
try:
(options, args) = parser.parse_args(argv)
except:
raise GeneralOptionParser("Usage:")
try:
t4d = tir4.TIR4Control()
if not t4d.is_device_present():
print "Unable to find TIR4 device."
sys.exit(1)
done = False
while not(done):
t4d.do_init_step_start()
t4d.nq_write_usb((0x12,))
time.sleep(0.1)
t4d.nq_write_usb((0x14,0x01))
time.sleep(0.1)
t4d.nq_write_usb((0x12,))
time.sleep(0.1)
t4d.nq_write_usb((0x13,))
time.sleep(0.1)
t4d.nq_write_usb((0x17,))
time.sleep(0.1)
t4d.do_read_usb()
bytes = t4d.readbyteq.peek_bytes()
if len(bytes) > 0:
done = True
else:
time.sleep(1)
for byte in bytes:
print "0x%02x" % byte
except tir4.TIR4Exception, errorinfo:
handle_exception(errorinfo)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| mit |
PaulWay/spacewalk | client/rhel/rhn-client-tools/src/up2date_client/rhnreg_constants.py | 2 | 19114 | # -*- coding: utf-8 -*-
#
# String constants for the RHN Register TUI/GUI.
# Copyright (c) 2000--2014 Red Hat, Inc.
#
# Author:
# James Slagle <jslagle@redhat.com>
import gettext
t = gettext.translation('rhn-client-tools', fallback=True)
_ = t.ugettext
COPYRIGHT_TEXT = _(u"Copyright © 2006--2014 Red Hat, Inc. All rights reserved.")
# Satellite URL Window
SATELLITE_URL_WINDOW = _("Enter your Red Hat Satellite URL.")
SATELLITE_URL_TEXT = _("Please enter the location of your Red Hat Satellite "
"server and of its SSL "
"certificate. The SSL certificate is only required "
"if you will be connecting over https (recommended).")
SATELLITE_URL_PROMPT = _("Satellite URL:")
SATELLITE_URL_PROMPT2 = _("SSL certificate:")
SATELLITE_REQUIRED = _("You must enter a valid Satellite URL.")
SSL_REQUIRED = _("If you are using https you must enter the location "
"of a valid SSL certificate.")
# Connect Window
CONNECT_WINDOW = _("Attempting to contact the Spacewalk server.")
CONNECT_WINDOW_TEXT = _("We are attempting to contact the Red Hat "
"Network server at %s.")
CONNECT_WINDOW_TEXT2 = _("A proxy was specified at %s.")
# Start Window
START_REGISTER_WINDOW = _("System Registration")
START_REGISTER_TEXT = _("This assistant will guide you through "
"connecting your system to "
"Red Hat Satellite to receive software "
"updates, including "
"security updates, to keep your system supported "
"and compliant. "
"You will need the following at this time:\n\n"
" * A network connection\n"
" * Your Red Hat Login & password\n"
" * The location of a Red Hat Satellite "
"or Proxy\n\n")
# Why Register Window
WHY_REGISTER = _("Why Should I Connect to RHN? ...")
WHY_REGISTER_WINDOW = _("Why Register")
WHY_REGISTER_TEXT = _("Connecting your system to Red Hat Satellite allows you to take full "
"advantage of the benefits of a paid subscription, including:")
WHY_REGISTER_SEC = _("Security & Updates:")
WHY_REGISTER_DLD = _("Downloads & Upgrades:")
WHY_REGISTER_SUPP = _("Support:")
WHY_REGISTER_COMP = _("Compliance:")
WHY_REGISTER_SEC_TXT = _("Receive the latest software updates, including security updates, keeping this "
"Red Hat Enterprise Linux system updated and secure.")
WHY_REGISTER_DLD_TXT = _("Download installation images for Red Hat Enterprise Linux releases, "
"including new releases.")
WHY_REGISTER_SUPP_TXT = _("Access to the technical support experts at Red Hat or Red Hat's partners for help "
"with any issues you might encounter with this system.")
WHY_REGISTER_COMP_TXT = _("Stay in compliance with your subscription agreement "
"and manage subscriptions "
"for systems connected to your account.")
WHY_REGISTER_TIP = _("Tip: Red Hat values your privacy: "
"http://www.redhat.com/legal/privacy_statement.html")
BACK_REGISTER = _("Take me back to the registration")
# Confirm Quit Window
CONFIRM_QUIT = _("Software Update Not Set Up")
CONFIRM_QUIT_SURE = _("Are you sure you don't want to connect your system to Red Hat Satellite? "
"You'll miss out on the benefits of a Red Hat Enterprise Linux subscription:\n")
CONFIRM_QUIT_WILLNOT = _("You will not be able to take advantage of these subscription privileges without connecting "
"your system to Red Hat Satellite.\n")
CONTINUE_REGISTERING = _("Take me back to the setup process.")
REGISTER_LATER2 = _("I'll register later.")
# Info Window
REGISTER_WINDOW = _("Red Hat Account")
LOGIN_PROMPT = _("Please enter your login information for the %s Red "
"Hat Network Satellite:\n\n")
HOSTED_LOGIN = _("Red Hat Login:")
LOGIN = _("Login:")
PASSWORD = _("Password:")
LOGIN_TIP = _("Tip: Forgot your login or password? Contact your "
"Satellite's Organization Administrator.")
USER_REQUIRED = _("Please enter a desired login.")
PASSWORD_REQUIRED = _("Please enter and verify a password.")
# OS Release Window
SELECT_OSRELEASE = _("Operating System Release Version")
OS_VERSION = _("Operating System version:")
MINOR_RELEASE = _(" Minor Release: ")
LIMITED_UPDATES = _("Limited Updates Only")
ALL_UPDATES = _("All available updates")
CONFIRM_OS_RELEASE_SELECTION = _("Confirm operating system release selection")
CONFIRM_OS_ALL = _("Your system will be subscribed to the base"
" software channel to receive all available"
" updates.")
# Hardware Window
HARDWARE_WINDOW = _("Create Profile - Hardware")
HARDWARE_WINDOW_DESC1 = _("A Profile Name is a descriptive name that"
" you choose to identify this System Profile"
" on the Red Hat Satellite web pages. Optionally,"
" include a computer serial or identification number.")
HARDWARE_WINDOW_DESC2 = _("Additional hardware information including PCI"
" devices, disk sizes and mount points will be"
" included in the profile.")
HARDWARE_WINDOW_CHECKBOX = _("Include the following information about hardware"
" and network:")
# Packages Window
PACKAGES_WINDOW = _("Create Profile - Packages")
PACKAGES_WINDOW_DESC1 = _("RPM information is important to determine what"
" updated software packages are relevant to this"
" system.")
PACKAGES_WINDOW_DESC2 = _("Include RPM packages installed on this system"
" in my System Profile")
PACKAGES_WINDOW_UNCHECK = _("You may deselect individual packages by"
" unchecking them below.")
PACKAGES_WINDOW_PKGLIST = _("Building Package List")
# Product Window
EMAIL = _("*Email Address:")
SYSTEM_ALREADY_SETUP = _("System Already Registered")
SYSTEM_ALREADY_REGISTERED = _("It appears this system has already been set up for software updates:")
SYSTEM_ALREADY_REGISTERED_CONT = _("Are you sure you would like to continue?")
RHSM_SYSTEM_ALREADY_REGISTERED = _("This system has already been registered using Red Hat Subscription Management.\n\n"
"Your system is being registered again using Red Hat Satellite"
" or Red Hat Satellite Proxy technology. Red Hat recommends that customers only register once.\n\n"
"To learn more about RHN Classic/Red Hat Satellite registration and technologies please consult this"
" Knowledge Base Article: https://access.redhat.com/kb/docs/DOC-45563")
# Send Window
SEND_WINDOW = _("Send Profile Information to Red Hat Satellite")
SEND_WINDOW_DESC = _("We are finished collecting information for the System Profile.\n\n"
"Press \"Next\" to send this System Profile to Red Hat Satellite. "
"Click \"Cancel\" and no information will be sent. "
"You can run the registration program later by "
"typing `rhn_register` at the command line.")
# Sending Window
SENDING_WINDOW = _("Sending Profile to Red Hat Satellite")
# Finish Window
FINISH_WINDOW = _("Updates Configured")
FINISH_WINDOW_TEXT_TUI = _("You may now run 'yum update' from this system's "
"command line to get the latest "
"software updates from Red Hat Satellite. You will need to run this "
"periodically to "
"get the latest updates. Alternatively, you may configure this "
"system for automatic software updates (also known as 'auto errata update') "
"via the Red Hat Satellite web interface. (Instructions for this are in chapter 6 "
"of the RHN Reference Guide, available from the 'Help' button in the main Red "
"Hat Network Satellite web interface.)")
# Review Window
REVIEW_WINDOW = _("Review Subscription")
REVIEW_WINDOW_PROMPT = _("Please review the subscription details below:")
SUB_NUM = _("The installation number %s was activated during "
"this system's initial connection to Red Hat Satellite.")
SUB_NUM_RESULT = _("Subscriptions have been activated for the following "
"Red Hat products/services:")
CHANNELS_TITLE = _("Software Channel Subscriptions:")
OK_CHANNELS = _("This system will receive updates from the "
"following software channels:")
CHANNELS_SAT_WARNING = _("Warning: Only installed product listed above will receive "
"updates and support. If you would like "
"to receive updates for additional products, please "
"login to your satellite web interface "
"and subscribe this system to the appropriate "
"software channels. See Kbase article "
"for more details. "
"(http://kbase.redhat.com/faq/docs/DOC-11313)")
YUM_PLUGIN_WARNING = _("Warning: yum-rhn-plugin is not present, could not enable it.\n"
"Automatic updates will not work.")
YUM_PLUGIN_CONF_CHANGED = _("Note: yum-rhn-plugin has been enabled.")
YUM_PLUGIN_CONF_ERROR = _("Warning: An error occurred during enabling yum-rhn-plugin.\n"
"yum-rhn-plugin is not enabled.\n"
"Automatic updates will not work.")
FAILED_CHANNELS = _("You were unable to be subscribed to the following "
"software channels because there were insufficient "
"subscriptions available in your account:")
NO_BASE_CHANNEL = _(
"This system was unable to subscribe to any software channels. Your system "
"will not receive any software updates to keep it secure and supported. "
"Contact your Satellite administrator about this problem. Once you make the "
"appropriate active subscriptions available in your account, you may browse "
"to this system's profile in the RHN web interface and subscribe this system "
"to software channels via the software > software channels tab.")
SLOTS_TITLE = _("Service Level:")
OK_SLOTS = _("Depending on what Red Hat Satellite modules are associated with a system, you'll "
"enjoy different benefits. The following are the "
"Red Hat Satellite modules associated with this system:")
SLOTS = SLOTS_TITLE + "\n" + OK_SLOTS + "\n%s"
FAILED_SLOTS = _("This system was unable to be associated with the "
"following RHN module(s) because there were "
"insufficient subscriptions available in your account:")
UPDATES = _("Update module: per-system updates, email errata "
"notifications, errata information")
MANAGEMENT = _("Management module: automatic updates, systems "
"grouping, systems permissions, system package profiling")
PROVISIONING = _("Provisioning module: bare-metal provisioning, existing state provisioning, "
"rollbacks, configuration management")
MONITORING = _("Monitoring module: pre-defined and custom system "
"performance probes, system performance email "
"notifications, graphs of system performance")
VIRT = _("Virtualization module: software updates for a limited number of "
"virtual guests on this system.")
VIRT_PLATFORM = _("Virtualization Platform module: software updates for an "
"unlimited number virtual guests of this system, access to additional "
"software channels for guests of this system.")
VIRT_FAILED = _("<b>Warning:</b> Any guest systems you create on this system "
"and register to RHN will consume Red Hat Enterprise Linux "
"subscriptions beyond this host system's subscription. You will need "
"to: (1) make a virtualization or virtualization platform system "
"entitlement available and (2) apply that system entitlement to this "
"system in RHN's web interface if you do not want virtual guests of "
"this system to consume additional subscriptions.")
NO_SYS_ENTITLEMENT = _("This system was unable to be associated with "
"any RHN service level modules. This system will not receive any software "
"updates to keep it secure and supported. Contace your Satellite administrator "
"about this problem. Once you make the "
"appropriate active subscriptions available in your account, you may browse "
"to this system's profile in the RHN web interface, delete the profile, and "
"re-connect this system to Red Hat Satellite.")
ACTIVATION_KEY = _("Universal default activation key detected\n"
"A universal default activation key was detected in your account. "
"This means that a set of properties (software channel subscriptions, "
"package installations, system group memberships, etc.) "
"for your system's connection to Red Hat Satellite or Red Hat Satellite Proxy"
"have been determined by the activation key rather than your "
"installation number. "
"You may also refer to the RHN Reference Guide, section 6.4.6 for more details "
"about activation keys (http://access.redhat.com/knowledge/docs/Red_Hat_Network/)\n"
"Universal Default activation key: %s")
# Error Messages.
FATAL_ERROR = _("Fatal Error")
WARNING = _("Warning")
HOSTED_CONNECTION_ERROR = _("We can't contact the Red Hat Satellite.\n\n"
"Double check the location provided - is '%s' correct?\n"
"If not, you can correct it and try again.\n\n"
"Make sure that the network connection on this system is operational.\n\n"
"This system will not be able to successfully receive software updates "
"from Red Hat without connecting to a Red Hat Satellite server")
BASECHANNELERROR = _("Architecture: %s, OS Release: %s, OS "
"Version: %s")
SERVER_TOO_OLD = _("This server doesn't support functionality "
"needed by this version of the software update"
" setup client. Please try again with a newer "
"server.")
SSL_CERT_ERROR_MSG = _("<b><span size=\"16000\">Incompatible Certificate File</span></b>\n\n"
"The certificate you provided, <b>%s</b>, is not compatible with "
" the Red Hat Satellite server at <b>%s</b>. You may want to double-check"
" that you have provided a valid certificate file."
" Are you sure you have provided the correct certificate, and that"
" the certificate file has not been corrupted?\n\n"
"Please try again with a different certificate file.")
SSL_CERT_EXPIRED = _("<b><span size=\"12000\">Incompatible Certificate File</span></b>\n\n"
" The certificate is expired. Please ensure you have the correct "
" certificate and your system time is correct.")
SSL_CERT_FILE_NOT_FOUND_ERRER = _("Please verify the values of sslCACert and serverURL in "
"/etc/sysconfig/rhn/up2date. You can either make the "
"serverURL use http instead of https, or you can "
"download the SSL cert from your Satellite, place it "
"in /usr/share/rhn, and ensure sslCACert points to it.")
ACT_KEY_USAGE_LIMIT_ERROR = _("Problem registering system.\n\n"
"A universal default activation key limits the "
"number of systems which can connect to "
"the RHN organization associated with your "
"login. To allow this system to connect, "
"please contact your RHN organization "
"administrator to increase the number of "
"systems allowed to connect or to disable "
"this universal default activation key. "
"More details can be found in Red Hat "
"Knowledgebase Article #7924 at "
"http://kbase.redhat.com/faq/FAQ_61_7924.shtm ")
CHANNEL_PAGE_TIP = _("\n Tip: Minor releases with a '*' are currently"
" supported by Red Hat.\n\n")
CHANNEL_PAGE_WARNING = _("Warning:You will not be able to limit this"
" system to minor release that is older than"
" the recent minor release if you select this"
" option.\n")
CONFIRM_OS_WARNING = _("Your system will be subscribed to %s \n"
"base software channel. You will not be\n"
"able to move this system to an earlier release\n"
"(you will be able to move to a newer release).\n"
"Are you sure you would like to continue?")
# Navigation
OK = _("OK")
ERROR = _("Error")
NEXT = _("Next")
BACK = _("Back")
CANCEL = _("Cancel")
NO_CANCEL = _("No, Cancel")
YES_CONT = _("Yes, Continue")
DESELECT = _("Press <space> to deselect the option.")
| gpl-2.0 |
songjmcn/machine_learning | core/kmeans.py | 1 | 3013 | #coding=utf-8
'''
Created on 2014年6月18日
K-means 算法,将数据聚类到K个中心点
距离计算:欧式距离
中心店计算:均值法
@author: sjm
'''
import numpy as np
import random
def Euclid_dist(x,y):
if len(y.shape)==1:
return np.sqrt(np.sum(np.sum((x-y)**2)))
elif len(y.shape)==2:
return np.sqrt(np.sum((x-y)**2,axis=1))
else:
raise ValueError('error x or y shape')
def dist(x,y):
'''
计算两个数据间的距离,使用马氏距离
'''
return np.sqrt(np.sum((x-y)**2),axis=1)
def distMat(X,Y):
'''
计算两个矩阵间的距里,即矩阵里的每一个数据与另一个矩阵中每一个数据的距离
'''
mat=[map(lambda y:dist(x,y),Y) for x in X]
return np.array(mat)
def sum_dist(data,label,center):
s=0
for i in range(data.shape[0]):
s+=dist(data[i],center[label[i]])
return s
def kmeans(data,cluster,threshold=1.0e-19,maxIter=100):
data=np.array(data)
d1,d2=data.shape
'''
find the label
'''
batch=np.random.permutation(d1)
center=data[batch[0:cluster],:]
print(center.shape)
labels=np.zeros((d1,))
last_cost=0
for ii in xrange(0,d1):
d=Euclid_dist(data[ii,:],center[labels[ii],:])
last_cost+=d
for index in xrange(0,maxIter):
'''
寻找每个类的标号
'''
for ii in xrange(0,d1):
this_data=data[ii,:]
d=Euclid_dist(this_data,center)
label=np.argmin(d)
labels[ii]=label
for ii in xrange(0,cluster):
batch_no=(labels==ii).nonzero()
batch=data[batch_no]
m=np.mean(batch,axis=0)
#print(m.shape)
center[ii,:]=m
#print(center)
current_cost=0
for ii in xrange(0,d1):
d=Euclid_dist(data[ii,:],center[labels[ii],:])
current_cost+=d
if last_cost-current_cost<threshold:
break
else:
last_cost=current_cost
return center
'''
def kmeans2(data,cluster,threshold=1.0e-19,maxIter=100):
m=len(data)
labels=np.zeros(m)
#cluster=None
center=np.array(random.sample(data,cluster))
s=sum_dist(data,labels,center)
n=0
while 1:
n=n+1
tmp_mat=distMat(data,center)
labels=tmp_mat.argmin(axis=1)
for i in xrange(cluster):
idx=(labels==i).nonzero()
m=np.mean(data[idx[0]],axis=0)
center[i]=m
#d_i=data[idx[0]]
#d_i=d_i[0]
s1=sum_dist(data,labels,center)
if s-s1<threshold:
break;
s=s1
if n>maxIter:
break;
return center
'''
if __name__=='__main__':
from scipy.io import loadmat,savemat
data=loadmat(r'E:\code\matlab\DeepLearnToolbox-master\data\mnist_uint8.mat')
train_x=np.asarray(data['train_x'],np.float)/255.0
codebook=kmeans(train_x,10)
savemat('codebook.mat',{'C':codebook}) | gpl-2.0 |
selective-inference/selective-inference | doc/learning_examples/riboflavin/CV.py | 3 | 10083 | import functools, hashlib
import numpy as np
from scipy.stats import norm as normal_dbn
import regreg.api as rr
from selection.algorithms.debiased_lasso import pseudoinverse_debiasing_matrix
# load in the X matrix
import rpy2.robjects as rpy
from rpy2.robjects import numpy2ri
rpy.r('library(hdi); data(riboflavin); X = riboflavin$x')
numpy2ri.activate()
X_full = np.asarray(rpy.r('X'))
numpy2ri.deactivate()
from selection.learning.utils import full_model_inference, liu_inference, pivot_plot
from selection.learning.core import split_sampler, keras_fit, repeat_selection, infer_set_target
from selection.learning.Rutils import lasso_glmnet, cv_glmnet_lam
from selection.learning.learners import mixture_learner
def highdim_model_inference(X,
y,
truth,
selection_algorithm,
sampler,
lam_min,
dispersion,
success_params=(1, 1),
fit_probability=keras_fit,
fit_args={'epochs':10, 'sizes':[100]*5, 'dropout':0., 'activation':'relu'},
alpha=0.1,
B=2000,
naive=True,
learner_klass=mixture_learner,
how_many=None):
n, p = X.shape
XTX = X.T.dot(X)
instance_hash = hashlib.md5()
instance_hash.update(X.tobytes())
instance_hash.update(y.tobytes())
instance_hash.update(truth.tobytes())
instance_id = instance_hash.hexdigest()
# run selection algorithm
observed_set = repeat_selection(selection_algorithm, sampler, *success_params)
observed_list = sorted(observed_set)
# observed debiased LASSO estimate
loss = rr.squared_error(X, y)
pen = rr.l1norm(p, lagrange=lam_min)
problem = rr.simple_problem(loss, pen)
soln = problem.solve()
grad = X.T.dot(X.dot(soln) - y) # gradient at beta_hat
M = pseudoinverse_debiasing_matrix(X,
observed_list)
observed_target = soln[observed_list] - M.dot(grad)
tmp = X.dot(M.T)
target_cov = tmp.T.dot(tmp) * dispersion
cross_cov = np.identity(p)[:,observed_list] * dispersion
if len(observed_list) > 0:
if how_many is None:
how_many = len(observed_list)
observed_list = observed_list[:how_many]
# find the target, based on the observed outcome
(pivots,
covered,
lengths,
pvalues,
lower,
upper) = [], [], [], [], [], []
targets = []
true_target = truth[observed_list]
results = infer_set_target(selection_algorithm,
observed_set,
observed_list,
sampler,
observed_target,
target_cov,
cross_cov,
hypothesis=true_target,
fit_probability=fit_probability,
fit_args=fit_args,
success_params=success_params,
alpha=alpha,
B=B,
learner_klass=learner_klass)
for i, result in enumerate(results):
(pivot,
interval,
pvalue,
_) = result
pvalues.append(pvalue)
pivots.append(pivot)
covered.append((interval[0] < true_target[i]) * (interval[1] > true_target[i]))
lengths.append(interval[1] - interval[0])
lower.append(interval[0])
upper.append(interval[1])
if len(pvalues) > 0:
df = pd.DataFrame({'pivot':pivots,
'pvalue':pvalues,
'coverage':covered,
'length':lengths,
'upper':upper,
'lower':lower,
'id':[instance_id]*len(pvalues),
'target':true_target,
'variable':observed_list,
'B':[B]*len(pvalues)})
if naive:
(naive_pvalues,
naive_pivots,
naive_covered,
naive_lengths,
naive_upper,
naive_lower) = [], [], [], [], [], []
for j, idx in enumerate(observed_list):
true_target = truth[idx]
target_sd = np.sqrt(target_cov[j, j])
observed_target_j = observed_target[j]
quantile = normal_dbn.ppf(1 - 0.5 * alpha)
naive_interval = (observed_target_j - quantile * target_sd,
observed_target_j + quantile * target_sd)
naive_upper.append(naive_interval[1])
naive_lower.append(naive_interval[0])
naive_pivot = (1 - normal_dbn.cdf((observed_target_j - true_target) / target_sd))
naive_pivot = 2 * min(naive_pivot, 1 - naive_pivot)
naive_pivots.append(naive_pivot)
naive_pvalue = (1 - normal_dbn.cdf(observed_target_j / target_sd))
naive_pvalue = 2 * min(naive_pvalue, 1 - naive_pvalue)
naive_pvalues.append(naive_pvalue)
naive_covered.append((naive_interval[0] < true_target) * (naive_interval[1] > true_target))
naive_lengths.append(naive_interval[1] - naive_interval[0])
naive_df = pd.DataFrame({'naive_pivot':naive_pivots,
'naive_pvalue':naive_pvalues,
'naive_coverage':naive_covered,
'naive_length':naive_lengths,
'naive_upper':naive_upper,
'naive_lower':naive_lower,
'variable':observed_list,
})
df = pd.merge(df, naive_df, on='variable')
return df
boot_design = False
def simulate(s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=3000, seed=0):
# description of statistical problem
n, p = X_full.shape
if boot_design:
idx = np.random.choice(np.arange(n), n, replace=True)
X = X_full[idx] # bootstrap X to make it really an IID sample, i.e. don't condition on X throughout
X += 0.1 * np.std(X) * np.random.standard_normal(X.shape) # to make non-degenerate
else:
X = X_full.copy()
X = X - np.mean(X, 0)[None, :]
X = X / np.std(X, 0)[None, :]
truth = np.zeros(p)
truth[:s] = np.linspace(signal[0], signal[1], s)
np.random.shuffle(truth)
truth *= sigma / np.sqrt(n)
y = X.dot(truth) + sigma * np.random.standard_normal(n)
lam_min, lam_1se = cv_glmnet_lam(X.copy(), y.copy(), seed=seed)
lam_min, lam_1se = n * lam_min, n * lam_1se
XTX = X.T.dot(X)
XTXi = np.linalg.inv(XTX)
resid = y - X.dot(XTXi.dot(X.T.dot(y)))
dispersion = sigma**2
S = X.T.dot(y)
covS = dispersion * X.T.dot(X)
splitting_sampler = split_sampler(X * y[:, None], covS)
def meta_algorithm(X, XTXi, resid, sampler):
S = sampler.center.copy()
ynew = X.dot(XTXi).dot(S) + resid # will be ok for n>p and non-degen X
G = lasso_glmnet(X, ynew, *[None]*4)
select = G.select(seed=seed)
return set(list(select[0]))
selection_algorithm = functools.partial(meta_algorithm, X, XTXi, resid)
# run selection algorithm
df = highdim_model_inference(X,
y,
truth,
selection_algorithm,
splitting_sampler,
lam_min,
sigma**2, # dispersion assumed known for now
success_params=(1, 1),
B=B,
fit_probability=keras_fit,
fit_args={'epochs':10, 'sizes':[100]*5, 'dropout':0., 'activation':'relu'})
if df is not None:
liu_df = liu_inference(X,
y,
1.00001 * lam_min,
dispersion,
truth,
alpha=alpha,
approximate_inverse='BN')
return pd.merge(df, liu_df, on='variable')
if __name__ == "__main__":
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
U = np.linspace(0, 1, 101)
plt.clf()
init_seed = np.fabs(np.random.standard_normal() * 500)
for i in range(500):
df = simulate(seed=init_seed+i)
csvfile = 'riboflavin_CV.csv'
outbase = csvfile[:-4]
if df is not None and i > 0:
try:
df = pd.concat([df, pd.read_csv(csvfile)])
except FileNotFoundError:
pass
df.to_csv(csvfile, index=False)
if len(df['pivot']) > 0:
pivot_ax, lengths_ax = pivot_plot(df, outbase)
liu_pivot = df['liu_pivot']
liu_pivot = liu_pivot[~np.isnan(liu_pivot)]
pivot_ax.plot(U, sm.distributions.ECDF(liu_pivot)(U), 'gray', label='Liu CV',
linewidth=3)
pivot_ax.legend()
fig = pivot_ax.figure
fig.savefig(csvfile[:-4] + '.pdf')
| bsd-3-clause |
paulboal/pexpect-curses | swearjar/test/curses01.py | 1 | 1647 | #!/usr/bin/python
import curses
import math
import os
import sys
import fcntl
import struct
import termios
import array
import logging
import time
logging.basicConfig(filename='example.log',level=logging.DEBUG)
__doc__="""\
The program presents a series of menus that we're going to use for testing.
1. Enter new person
2. Update person
3. Search for person
4. Delete person
The tests will navigate through the menu options and perform various actions.
Below are the test numbers and their corresponding activities. This series of
tests simulates several data entry scenarios.
1. After the main menu loads, type "1" to select the option to enter a new person.
2. On the Enter New Person screen, type in First Name, Last Name, Phone numbers
with tabs in between to shift fields.
"""
WELCOME="Chronicles Main Menu"
HELP="<Help> - Instructions"
MENU1="1. Edit Data"
def center(s,n):
return " "*int(math.floor((n-1-len(s))/2)) + s + " "*(n-len(s)-int(math.floor((n-1-len(s))/2)))
s = curses.initscr()
curses.curs_set(0)
curses.noecho()
#curses.raw()
#curses.cbreak()
MAXY,MAXX = s.getmaxyx()
try:
s.addnstr(0, 0, center(WELCOME,MAXX), MAXX, curses.A_REVERSE)
s.addnstr(MAXY-1, 0, center(HELP + " " + str(sys.stdout.isatty()) + " " + str(MAXY) + "," + str(MAXX),MAXX), MAXX, curses.A_REVERSE)
except:
pass
s.refresh()
time.sleep(0.2)
s.addnstr(5, 4, MENU1, MAXX)
while True:
s.refresh()
k = s.getch()
if chr(k) == 'q': break
try:
s.addnstr(MAXY-1,0, center(HELP + " YOU PRESSED A KEY: " + chr(k) + " <" + str(MAXY) + "," + str(MAXX) + ">" ,MAXX), MAXX, curses.A_REVERSE)
except:
pass
curses.endwin()
| mit |
stainsteelcrown/nonsense-story-generator | venv/lib/python2.7/site-packages/werkzeug/contrib/fixers.py | 464 | 9949 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.fixers
~~~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.5
This module includes various helpers that fix bugs in web servers. They may
be necessary for some versions of a buggy web server but not others. We try
to stay updated with the status of the bugs as good as possible but you have
to make sure whether they fix the problem you encounter.
If you notice bugs in webservers not fixed in this module consider
contributing a patch.
:copyright: Copyright 2009 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
from werkzeug.http import parse_options_header, parse_cache_control_header, \
parse_set_header
from werkzeug.useragents import UserAgent
from werkzeug.datastructures import Headers, ResponseCacheControl
class CGIRootFix(object):
"""Wrap the application in this middleware if you are using FastCGI or CGI
and you have problems with your app root being set to the cgi script's path
instead of the path users are going to visit
.. versionchanged:: 0.9
Added `app_root` parameter and renamed from `LighttpdCGIRootFix`.
:param app: the WSGI application
:param app_root: Defaulting to ``'/'``, you can set this to something else
if your app is mounted somewhere else.
"""
def __init__(self, app, app_root='/'):
self.app = app
self.app_root = app_root
def __call__(self, environ, start_response):
# only set PATH_INFO for older versions of Lighty or if no
# server software is provided. That's because the test was
# added in newer Werkzeug versions and we don't want to break
# people's code if they are using this fixer in a test that
# does not set the SERVER_SOFTWARE key.
if 'SERVER_SOFTWARE' not in environ or \
environ['SERVER_SOFTWARE'] < 'lighttpd/1.4.28':
environ['PATH_INFO'] = environ.get('SCRIPT_NAME', '') + \
environ.get('PATH_INFO', '')
environ['SCRIPT_NAME'] = self.app_root.strip('/')
return self.app(environ, start_response)
# backwards compatibility
LighttpdCGIRootFix = CGIRootFix
class PathInfoFromRequestUriFix(object):
"""On windows environment variables are limited to the system charset
which makes it impossible to store the `PATH_INFO` variable in the
environment without loss of information on some systems.
This is for example a problem for CGI scripts on a Windows Apache.
This fixer works by recreating the `PATH_INFO` from `REQUEST_URI`,
`REQUEST_URL`, or `UNENCODED_URL` (whatever is available). Thus the
fix can only be applied if the webserver supports either of these
variables.
:param app: the WSGI application
"""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
for key in 'REQUEST_URL', 'REQUEST_URI', 'UNENCODED_URL':
if key not in environ:
continue
request_uri = unquote(environ[key])
script_name = unquote(environ.get('SCRIPT_NAME', ''))
if request_uri.startswith(script_name):
environ['PATH_INFO'] = request_uri[len(script_name):] \
.split('?', 1)[0]
break
return self.app(environ, start_response)
class ProxyFix(object):
"""This middleware can be applied to add HTTP proxy support to an
application that was not designed with HTTP proxies in mind. It
sets `REMOTE_ADDR`, `HTTP_HOST` from `X-Forwarded` headers.
If you have more than one proxy server in front of your app, set
`num_proxies` accordingly.
Do not use this middleware in non-proxy setups for security reasons.
The original values of `REMOTE_ADDR` and `HTTP_HOST` are stored in
the WSGI environment as `werkzeug.proxy_fix.orig_remote_addr` and
`werkzeug.proxy_fix.orig_http_host`.
:param app: the WSGI application
:param num_proxies: the number of proxy servers in front of the app.
"""
def __init__(self, app, num_proxies=1):
self.app = app
self.num_proxies = num_proxies
def get_remote_addr(self, forwarded_for):
"""Selects the new remote addr from the given list of ips in
X-Forwarded-For. By default it picks the one that the `num_proxies`
proxy server provides. Before 0.9 it would always pick the first.
.. versionadded:: 0.8
"""
if len(forwarded_for) >= self.num_proxies:
return forwarded_for[-1 * self.num_proxies]
def __call__(self, environ, start_response):
getter = environ.get
forwarded_proto = getter('HTTP_X_FORWARDED_PROTO', '')
forwarded_for = getter('HTTP_X_FORWARDED_FOR', '').split(',')
forwarded_host = getter('HTTP_X_FORWARDED_HOST', '')
environ.update({
'werkzeug.proxy_fix.orig_wsgi_url_scheme': getter('wsgi.url_scheme'),
'werkzeug.proxy_fix.orig_remote_addr': getter('REMOTE_ADDR'),
'werkzeug.proxy_fix.orig_http_host': getter('HTTP_HOST')
})
forwarded_for = [x for x in [x.strip() for x in forwarded_for] if x]
remote_addr = self.get_remote_addr(forwarded_for)
if remote_addr is not None:
environ['REMOTE_ADDR'] = remote_addr
if forwarded_host:
environ['HTTP_HOST'] = forwarded_host
if forwarded_proto:
environ['wsgi.url_scheme'] = forwarded_proto
return self.app(environ, start_response)
class HeaderRewriterFix(object):
"""This middleware can remove response headers and add others. This
is for example useful to remove the `Date` header from responses if you
are using a server that adds that header, no matter if it's present or
not or to add `X-Powered-By` headers::
app = HeaderRewriterFix(app, remove_headers=['Date'],
add_headers=[('X-Powered-By', 'WSGI')])
:param app: the WSGI application
:param remove_headers: a sequence of header keys that should be
removed.
:param add_headers: a sequence of ``(key, value)`` tuples that should
be added.
"""
def __init__(self, app, remove_headers=None, add_headers=None):
self.app = app
self.remove_headers = set(x.lower() for x in (remove_headers or ()))
self.add_headers = list(add_headers or ())
def __call__(self, environ, start_response):
def rewriting_start_response(status, headers, exc_info=None):
new_headers = []
for key, value in headers:
if key.lower() not in self.remove_headers:
new_headers.append((key, value))
new_headers += self.add_headers
return start_response(status, new_headers, exc_info)
return self.app(environ, rewriting_start_response)
class InternetExplorerFix(object):
"""This middleware fixes a couple of bugs with Microsoft Internet
Explorer. Currently the following fixes are applied:
- removing of `Vary` headers for unsupported mimetypes which
causes troubles with caching. Can be disabled by passing
``fix_vary=False`` to the constructor.
see: http://support.microsoft.com/kb/824847/en-us
- removes offending headers to work around caching bugs in
Internet Explorer if `Content-Disposition` is set. Can be
disabled by passing ``fix_attach=False`` to the constructor.
If it does not detect affected Internet Explorer versions it won't touch
the request / response.
"""
# This code was inspired by Django fixers for the same bugs. The
# fix_vary and fix_attach fixers were originally implemented in Django
# by Michael Axiak and is available as part of the Django project:
# http://code.djangoproject.com/ticket/4148
def __init__(self, app, fix_vary=True, fix_attach=True):
self.app = app
self.fix_vary = fix_vary
self.fix_attach = fix_attach
def fix_headers(self, environ, headers, status=None):
if self.fix_vary:
header = headers.get('content-type', '')
mimetype, options = parse_options_header(header)
if mimetype not in ('text/html', 'text/plain', 'text/sgml'):
headers.pop('vary', None)
if self.fix_attach and 'content-disposition' in headers:
pragma = parse_set_header(headers.get('pragma', ''))
pragma.discard('no-cache')
header = pragma.to_header()
if not header:
headers.pop('pragma', '')
else:
headers['Pragma'] = header
header = headers.get('cache-control', '')
if header:
cc = parse_cache_control_header(header,
cls=ResponseCacheControl)
cc.no_cache = None
cc.no_store = False
header = cc.to_header()
if not header:
headers.pop('cache-control', '')
else:
headers['Cache-Control'] = header
def run_fixed(self, environ, start_response):
def fixing_start_response(status, headers, exc_info=None):
headers = Headers(headers)
self.fix_headers(environ, headers, status)
return start_response(status, headers.to_wsgi_list(), exc_info)
return self.app(environ, fixing_start_response)
def __call__(self, environ, start_response):
ua = UserAgent(environ)
if ua.browser != 'msie':
return self.app(environ, start_response)
return self.run_fixed(environ, start_response)
| mit |
EmreAtes/spack | var/spack/repos/builtin/packages/py-maestrowf/package.py | 5 | 1771 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyMaestrowf(PythonPackage):
"""A general purpose workflow conductor for running multi-step
simulation studies."""
homepage = "https://github.com/LLNL/maestrowf/"
url = "https://github.com/LLNL/maestrowf/archive/v1.0.1.tar.gz"
version('1.0.1', '6838fc8bdc7ca0c1adbb6a0333f005b4')
depends_on('py-setuptools', type='build')
depends_on('py-pyyaml', type=('build', 'run'))
depends_on('py-six', type=('build', 'run'))
depends_on('py-enum34', type=('build', 'run'))
| lgpl-2.1 |
nitin-cherian/LifeLongLearning | Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/prompt_toolkit/layout/lexers.py | 15 | 11334 | """
Lexer interface and implementation.
Used for syntax highlighting.
"""
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod
from six import with_metaclass
from six.moves import range
from prompt_toolkit.token import Token
from prompt_toolkit.filters import to_cli_filter
from .utils import split_lines
import re
import six
__all__ = (
'Lexer',
'SimpleLexer',
'PygmentsLexer',
'SyntaxSync',
'SyncFromStart',
'RegexSync',
)
class Lexer(with_metaclass(ABCMeta, object)):
"""
Base class for all lexers.
"""
@abstractmethod
def lex_document(self, cli, document):
"""
Takes a :class:`~prompt_toolkit.document.Document` and returns a
callable that takes a line number and returns the tokens for that line.
"""
class SimpleLexer(Lexer):
"""
Lexer that doesn't do any tokenizing and returns the whole input as one token.
:param token: The `Token` for this lexer.
"""
# `default_token` parameter is deprecated!
def __init__(self, token=Token, default_token=None):
self.token = token
if default_token is not None:
self.token = default_token
def lex_document(self, cli, document):
lines = document.lines
def get_line(lineno):
" Return the tokens for the given line. "
try:
return [(self.token, lines[lineno])]
except IndexError:
return []
return get_line
class SyntaxSync(with_metaclass(ABCMeta, object)):
"""
Syntax synchroniser. This is a tool that finds a start position for the
lexer. This is especially important when editing big documents; we don't
want to start the highlighting by running the lexer from the beginning of
the file. That is very slow when editing.
"""
@abstractmethod
def get_sync_start_position(self, document, lineno):
"""
Return the position from where we can start lexing as a (row, column)
tuple.
:param document: `Document` instance that contains all the lines.
:param lineno: The line that we want to highlight. (We need to return
this line, or an earlier position.)
"""
class SyncFromStart(SyntaxSync):
"""
Always start the syntax highlighting from the beginning.
"""
def get_sync_start_position(self, document, lineno):
return 0, 0
class RegexSync(SyntaxSync):
"""
Synchronize by starting at a line that matches the given regex pattern.
"""
# Never go more than this amount of lines backwards for synchronisation.
# That would be too CPU intensive.
MAX_BACKWARDS = 500
# Start lexing at the start, if we are in the first 'n' lines and no
# synchronisation position was found.
FROM_START_IF_NO_SYNC_POS_FOUND = 100
def __init__(self, pattern):
assert isinstance(pattern, six.text_type)
self._compiled_pattern = re.compile(pattern)
def get_sync_start_position(self, document, lineno):
" Scan backwards, and find a possible position to start. "
pattern = self._compiled_pattern
lines = document.lines
# Scan upwards, until we find a point where we can start the syntax
# synchronisation.
for i in range(lineno, max(-1, lineno - self.MAX_BACKWARDS), -1):
match = pattern.match(lines[i])
if match:
return i, match.start()
# No synchronisation point found. If we aren't that far from the
# beginning, start at the very beginning, otherwise, just try to start
# at the current line.
if lineno < self.FROM_START_IF_NO_SYNC_POS_FOUND:
return 0, 0
else:
return lineno, 0
@classmethod
def from_pygments_lexer_cls(cls, lexer_cls):
"""
Create a :class:`.RegexSync` instance for this Pygments lexer class.
"""
patterns = {
# For Python, start highlighting at any class/def block.
'Python': r'^\s*(class|def)\s+',
'Python 3': r'^\s*(class|def)\s+',
# For HTML, start at any open/close tag definition.
'HTML': r'<[/a-zA-Z]',
# For javascript, start at a function.
'JavaScript': r'\bfunction\b'
# TODO: Add definitions for other languages.
# By default, we start at every possible line.
}
p = patterns.get(lexer_cls.name, '^')
return cls(p)
class PygmentsLexer(Lexer):
"""
Lexer that calls a pygments lexer.
Example::
from pygments.lexers import HtmlLexer
lexer = PygmentsLexer(HtmlLexer)
Note: Don't forget to also load a Pygments compatible style. E.g.::
from prompt_toolkit.styles.from_pygments import style_from_pygments
from pygments.styles import get_style_by_name
style = style_from_pygments(get_style_by_name('monokai'))
:param pygments_lexer_cls: A `Lexer` from Pygments.
:param sync_from_start: Start lexing at the start of the document. This
will always give the best results, but it will be slow for bigger
documents. (When the last part of the document is display, then the
whole document will be lexed by Pygments on every key stroke.) It is
recommended to disable this for inputs that are expected to be more
than 1,000 lines.
:param syntax_sync: `SyntaxSync` object.
"""
# Minimum amount of lines to go backwards when starting the parser.
# This is important when the lines are retrieved in reverse order, or when
# scrolling upwards. (Due to the complexity of calculating the vertical
# scroll offset in the `Window` class, lines are not always retrieved in
# order.)
MIN_LINES_BACKWARDS = 50
# When a parser was started this amount of lines back, read the parser
# until we get the current line. Otherwise, start a new parser.
# (This should probably be bigger than MIN_LINES_BACKWARDS.)
REUSE_GENERATOR_MAX_DISTANCE = 100
def __init__(self, pygments_lexer_cls, sync_from_start=True, syntax_sync=None):
assert syntax_sync is None or isinstance(syntax_sync, SyntaxSync)
self.pygments_lexer_cls = pygments_lexer_cls
self.sync_from_start = to_cli_filter(sync_from_start)
# Instantiate the Pygments lexer.
self.pygments_lexer = pygments_lexer_cls(
stripnl=False,
stripall=False,
ensurenl=False)
# Create syntax sync instance.
self.syntax_sync = syntax_sync or RegexSync.from_pygments_lexer_cls(pygments_lexer_cls)
@classmethod
def from_filename(cls, filename, sync_from_start=True):
"""
Create a `Lexer` from a filename.
"""
# Inline imports: the Pygments dependency is optional!
from pygments.util import ClassNotFound
from pygments.lexers import get_lexer_for_filename
try:
pygments_lexer = get_lexer_for_filename(filename)
except ClassNotFound:
return SimpleLexer()
else:
return cls(pygments_lexer.__class__, sync_from_start=sync_from_start)
def lex_document(self, cli, document):
"""
Create a lexer function that takes a line number and returns the list
of (Token, text) tuples as the Pygments lexer returns for that line.
"""
# Cache of already lexed lines.
cache = {}
# Pygments generators that are currently lexing.
line_generators = {} # Map lexer generator to the line number.
def get_syntax_sync():
" The Syntax synchronisation objcet that we currently use. "
if self.sync_from_start(cli):
return SyncFromStart()
else:
return self.syntax_sync
def find_closest_generator(i):
" Return a generator close to line 'i', or None if none was fonud. "
for generator, lineno in line_generators.items():
if lineno < i and i - lineno < self.REUSE_GENERATOR_MAX_DISTANCE:
return generator
def create_line_generator(start_lineno, column=0):
"""
Create a generator that yields the lexed lines.
Each iteration it yields a (line_number, [(token, text), ...]) tuple.
"""
def get_tokens():
text = '\n'.join(document.lines[start_lineno:])[column:]
# We call `get_tokens_unprocessed`, because `get_tokens` will
# still replace \r\n and \r by \n. (We don't want that,
# Pygments should return exactly the same amount of text, as we
# have given as input.)
for _, t, v in self.pygments_lexer.get_tokens_unprocessed(text):
yield t, v
return enumerate(split_lines(get_tokens()), start_lineno)
def get_generator(i):
"""
Find an already started generator that is close, or create a new one.
"""
# Find closest line generator.
generator = find_closest_generator(i)
if generator:
return generator
# No generator found. Determine starting point for the syntax
# synchronisation first.
# Go at least x lines back. (Make scrolling upwards more
# efficient.)
i = max(0, i - self.MIN_LINES_BACKWARDS)
if i == 0:
row = 0
column = 0
else:
row, column = get_syntax_sync().get_sync_start_position(document, i)
# Find generator close to this point, or otherwise create a new one.
generator = find_closest_generator(i)
if generator:
return generator
else:
generator = create_line_generator(row, column)
# If the column is not 0, ignore the first line. (Which is
# incomplete. This happens when the synchronisation algorithm tells
# us to start parsing in the middle of a line.)
if column:
next(generator)
row += 1
line_generators[generator] = row
return generator
def get_line(i):
" Return the tokens for a given line number. "
try:
return cache[i]
except KeyError:
generator = get_generator(i)
# Exhaust the generator, until we find the requested line.
for num, line in generator:
cache[num] = line
if num == i:
line_generators[generator] = i
# Remove the next item from the cache.
# (It could happen that it's already there, because of
# another generator that started filling these lines,
# but we want to synchronise these lines with the
# current lexer's state.)
if num + 1 in cache:
del cache[num + 1]
return cache[num]
return []
return get_line
| mit |
fabioz/Pydev | plugins/org.python.pydev.core/pysrc/third_party/pep8/lib2to3/lib2to3/fixes/fix_xrange.py | 326 | 2699 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes xrange(...) into range(...)."""
# Local imports
from .. import fixer_base
from ..fixer_util import Name, Call, consuming_calls
from .. import patcomp
class FixXrange(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power<
(name='range'|name='xrange') trailer< '(' args=any ')' >
rest=any* >
"""
def start_tree(self, tree, filename):
super(FixXrange, self).start_tree(tree, filename)
self.transformed_xranges = set()
def finish_tree(self, tree, filename):
self.transformed_xranges = None
def transform(self, node, results):
name = results["name"]
if name.value == u"xrange":
return self.transform_xrange(node, results)
elif name.value == u"range":
return self.transform_range(node, results)
else:
raise ValueError(repr(name))
def transform_xrange(self, node, results):
name = results["name"]
name.replace(Name(u"range", prefix=name.prefix))
# This prevents the new range call from being wrapped in a list later.
self.transformed_xranges.add(id(node))
def transform_range(self, node, results):
if (id(node) not in self.transformed_xranges and
not self.in_special_context(node)):
range_call = Call(Name(u"range"), [results["args"].clone()])
# Encase the range call in list().
list_call = Call(Name(u"list"), [range_call],
prefix=node.prefix)
# Put things that were after the range() call after the list call.
for n in results["rest"]:
list_call.append_child(n)
return list_call
P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
p1 = patcomp.compile_pattern(P1)
P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
| comp_for< 'for' any 'in' node=any any* >
| comparison< any 'in' node=any any*>
"""
p2 = patcomp.compile_pattern(P2)
def in_special_context(self, node):
if node.parent is None:
return False
results = {}
if (node.parent.parent is not None and
self.p1.match(node.parent.parent, results) and
results["node"] is node):
# list(d.keys()) -> list(d.keys()), etc.
return results["func"].value in consuming_calls
# for ... in d.iterkeys() -> for ... in d.keys(), etc.
return self.p2.match(node.parent, results) and results["node"] is node
| epl-1.0 |
wso2/product-private-paas | components/org.wso2.ppaas.python.cartridge.agent/src/main/python/cartridge.agent/cartridge.agent/modules/databridge/thrift/thrift/protocol/TCompactProtocol.py | 19 | 10945 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from struct import pack, unpack
from TProtocol import *
__all__ = ['TCompactProtocol', 'TCompactProtocolFactory']
CLEAR = 0
FIELD_WRITE = 1
VALUE_WRITE = 2
CONTAINER_WRITE = 3
BOOL_WRITE = 4
FIELD_READ = 5
CONTAINER_READ = 6
VALUE_READ = 7
BOOL_READ = 8
def make_helper(v_from, container):
def helper(func):
def nested(self, *args, **kwargs):
assert self.state in (v_from, container), (self.state, v_from, container)
return func(self, *args, **kwargs)
return nested
return helper
writer = make_helper(VALUE_WRITE, CONTAINER_WRITE)
reader = make_helper(VALUE_READ, CONTAINER_READ)
def makeZigZag(n, bits):
return (n << 1) ^ (n >> (bits - 1))
def fromZigZag(n):
return (n >> 1) ^ -(n & 1)
def writeVarint(trans, n):
out = []
while True:
if n & ~0x7f == 0:
out.append(n)
break
else:
out.append((n & 0xff) | 0x80)
n = n >> 7
trans.write(''.join(map(chr, out)))
def readVarint(trans):
result = 0
shift = 0
while True:
x = trans.readAll(1)
byte = ord(x)
result |= (byte & 0x7f) << shift
if byte >> 7 == 0:
return result
shift += 7
class CompactType:
STOP = 0x00
TRUE = 0x01
FALSE = 0x02
BYTE = 0x03
I16 = 0x04
I32 = 0x05
I64 = 0x06
DOUBLE = 0x07
BINARY = 0x08
LIST = 0x09
SET = 0x0A
MAP = 0x0B
STRUCT = 0x0C
CTYPES = {TType.STOP: CompactType.STOP,
TType.BOOL: CompactType.TRUE, # used for collection
TType.BYTE: CompactType.BYTE,
TType.I16: CompactType.I16,
TType.I32: CompactType.I32,
TType.I64: CompactType.I64,
TType.DOUBLE: CompactType.DOUBLE,
TType.STRING: CompactType.BINARY,
TType.STRUCT: CompactType.STRUCT,
TType.LIST: CompactType.LIST,
TType.SET: CompactType.SET,
TType.MAP: CompactType.MAP
}
TTYPES = {}
for k, v in CTYPES.items():
TTYPES[v] = k
TTYPES[CompactType.FALSE] = TType.BOOL
del k
del v
class TCompactProtocol(TProtocolBase):
"""Compact implementation of the Thrift protocol driver."""
PROTOCOL_ID = 0x82
VERSION = 1
VERSION_MASK = 0x1f
TYPE_MASK = 0xe0
TYPE_SHIFT_AMOUNT = 5
def __init__(self, trans):
TProtocolBase.__init__(self, trans)
self.state = CLEAR
self.__last_fid = 0
self.__bool_fid = None
self.__bool_value = None
self.__structs = []
self.__containers = []
def __writeVarint(self, n):
writeVarint(self.trans, n)
def writeMessageBegin(self, name, type, seqid):
assert self.state == CLEAR
self.__writeUByte(self.PROTOCOL_ID)
self.__writeUByte(self.VERSION | (type << self.TYPE_SHIFT_AMOUNT))
self.__writeVarint(seqid)
self.__writeString(name)
self.state = VALUE_WRITE
def writeMessageEnd(self):
assert self.state == VALUE_WRITE
self.state = CLEAR
def writeStructBegin(self, name):
assert self.state in (CLEAR, CONTAINER_WRITE, VALUE_WRITE), self.state
self.__structs.append((self.state, self.__last_fid))
self.state = FIELD_WRITE
self.__last_fid = 0
def writeStructEnd(self):
assert self.state == FIELD_WRITE
self.state, self.__last_fid = self.__structs.pop()
def writeFieldStop(self):
self.__writeByte(0)
def __writeFieldHeader(self, type, fid):
delta = fid - self.__last_fid
if 0 < delta <= 15:
self.__writeUByte(delta << 4 | type)
else:
self.__writeByte(type)
self.__writeI16(fid)
self.__last_fid = fid
def writeFieldBegin(self, name, type, fid):
assert self.state == FIELD_WRITE, self.state
if type == TType.BOOL:
self.state = BOOL_WRITE
self.__bool_fid = fid
else:
self.state = VALUE_WRITE
self.__writeFieldHeader(CTYPES[type], fid)
def writeFieldEnd(self):
assert self.state in (VALUE_WRITE, BOOL_WRITE), self.state
self.state = FIELD_WRITE
def __writeUByte(self, byte):
self.trans.write(pack('!B', byte))
def __writeByte(self, byte):
self.trans.write(pack('!b', byte))
def __writeI16(self, i16):
self.__writeVarint(makeZigZag(i16, 16))
def __writeSize(self, i32):
self.__writeVarint(i32)
def writeCollectionBegin(self, etype, size):
assert self.state in (VALUE_WRITE, CONTAINER_WRITE), self.state
if size <= 14:
self.__writeUByte(size << 4 | CTYPES[etype])
else:
self.__writeUByte(0xf0 | CTYPES[etype])
self.__writeSize(size)
self.__containers.append(self.state)
self.state = CONTAINER_WRITE
writeSetBegin = writeCollectionBegin
writeListBegin = writeCollectionBegin
def writeMapBegin(self, ktype, vtype, size):
assert self.state in (VALUE_WRITE, CONTAINER_WRITE), self.state
if size == 0:
self.__writeByte(0)
else:
self.__writeSize(size)
self.__writeUByte(CTYPES[ktype] << 4 | CTYPES[vtype])
self.__containers.append(self.state)
self.state = CONTAINER_WRITE
def writeCollectionEnd(self):
assert self.state == CONTAINER_WRITE, self.state
self.state = self.__containers.pop()
writeMapEnd = writeCollectionEnd
writeSetEnd = writeCollectionEnd
writeListEnd = writeCollectionEnd
def writeBool(self, bool):
if self.state == BOOL_WRITE:
if bool:
ctype = CompactType.TRUE
else:
ctype = CompactType.FALSE
self.__writeFieldHeader(ctype, self.__bool_fid)
elif self.state == CONTAINER_WRITE:
if bool:
self.__writeByte(CompactType.TRUE)
else:
self.__writeByte(CompactType.FALSE)
else:
raise AssertionError("Invalid state in compact protocol")
writeByte = writer(__writeByte)
writeI16 = writer(__writeI16)
@writer
def writeI32(self, i32):
self.__writeVarint(makeZigZag(i32, 32))
@writer
def writeI64(self, i64):
self.__writeVarint(makeZigZag(i64, 64))
@writer
def writeDouble(self, dub):
self.trans.write(pack('!d', dub))
def __writeString(self, s):
self.__writeSize(len(s))
self.trans.write(s)
writeString = writer(__writeString)
def readFieldBegin(self):
assert self.state == FIELD_READ, self.state
type = self.__readUByte()
if type & 0x0f == TType.STOP:
return (None, 0, 0)
delta = type >> 4
if delta == 0:
fid = self.__readI16()
else:
fid = self.__last_fid + delta
self.__last_fid = fid
type = type & 0x0f
if type == CompactType.TRUE:
self.state = BOOL_READ
self.__bool_value = True
elif type == CompactType.FALSE:
self.state = BOOL_READ
self.__bool_value = False
else:
self.state = VALUE_READ
return (None, self.__getTType(type), fid)
def readFieldEnd(self):
assert self.state in (VALUE_READ, BOOL_READ), self.state
self.state = FIELD_READ
def __readUByte(self):
result, = unpack('!B', self.trans.readAll(1))
return result
def __readByte(self):
result, = unpack('!b', self.trans.readAll(1))
return result
def __readVarint(self):
return readVarint(self.trans)
def __readZigZag(self):
return fromZigZag(self.__readVarint())
def __readSize(self):
result = self.__readVarint()
if result < 0:
raise TException("Length < 0")
return result
def readMessageBegin(self):
assert self.state == CLEAR
proto_id = self.__readUByte()
if proto_id != self.PROTOCOL_ID:
raise TProtocolException(TProtocolException.BAD_VERSION,
'Bad protocol id in the message: %d' % proto_id)
ver_type = self.__readUByte()
type = (ver_type & self.TYPE_MASK) >> self.TYPE_SHIFT_AMOUNT
version = ver_type & self.VERSION_MASK
if version != self.VERSION:
raise TProtocolException(TProtocolException.BAD_VERSION,
'Bad version: %d (expect %d)' % (version, self.VERSION))
seqid = self.__readVarint()
name = self.__readString()
return (name, type, seqid)
def readMessageEnd(self):
assert self.state == CLEAR
assert len(self.__structs) == 0
def readStructBegin(self):
assert self.state in (CLEAR, CONTAINER_READ, VALUE_READ), self.state
self.__structs.append((self.state, self.__last_fid))
self.state = FIELD_READ
self.__last_fid = 0
def readStructEnd(self):
assert self.state == FIELD_READ
self.state, self.__last_fid = self.__structs.pop()
def readCollectionBegin(self):
assert self.state in (VALUE_READ, CONTAINER_READ), self.state
size_type = self.__readUByte()
size = size_type >> 4
type = self.__getTType(size_type)
if size == 15:
size = self.__readSize()
self.__containers.append(self.state)
self.state = CONTAINER_READ
return type, size
readSetBegin = readCollectionBegin
readListBegin = readCollectionBegin
def readMapBegin(self):
assert self.state in (VALUE_READ, CONTAINER_READ), self.state
size = self.__readSize()
types = 0
if size > 0:
types = self.__readUByte()
vtype = self.__getTType(types)
ktype = self.__getTType(types >> 4)
self.__containers.append(self.state)
self.state = CONTAINER_READ
return (ktype, vtype, size)
def readCollectionEnd(self):
assert self.state == CONTAINER_READ, self.state
self.state = self.__containers.pop()
readSetEnd = readCollectionEnd
readListEnd = readCollectionEnd
readMapEnd = readCollectionEnd
def readBool(self):
if self.state == BOOL_READ:
return self.__bool_value == CompactType.TRUE
elif self.state == CONTAINER_READ:
return self.__readByte() == CompactType.TRUE
else:
raise AssertionError("Invalid state in compact protocol: %d" %
self.state)
readByte = reader(__readByte)
__readI16 = __readZigZag
readI16 = reader(__readZigZag)
readI32 = reader(__readZigZag)
readI64 = reader(__readZigZag)
@reader
def readDouble(self):
buff = self.trans.readAll(8)
val, = unpack('!d', buff)
return val
def __readString(self):
len = self.__readSize()
return self.trans.readAll(len)
readString = reader(__readString)
def __getTType(self, byte):
return TTYPES[byte & 0x0f]
class TCompactProtocolFactory:
def __init__(self):
pass
def getProtocol(self, trans):
return TCompactProtocol(trans)
| apache-2.0 |
gauravkumar1987/googletest | xcode/Scripts/versiongenerate.py | 3088 | 4536 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
| bsd-3-clause |
FICTURE7/youtube-dl | youtube_dl/extractor/golem.py | 186 | 2181 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
)
from ..utils import (
determine_ext,
)
class GolemIE(InfoExtractor):
_VALID_URL = r'^https?://video\.golem\.de/.+?/(?P<id>.+?)/'
_TEST = {
'url': 'http://video.golem.de/handy/14095/iphone-6-und-6-plus-test.html',
'md5': 'c1a2c0a3c863319651c7c992c5ee29bf',
'info_dict': {
'id': '14095',
'format_id': 'high',
'ext': 'mp4',
'title': 'iPhone 6 und 6 Plus - Test',
'duration': 300.44,
'filesize': 65309548,
}
}
_PREFIX = 'http://video.golem.de'
def _real_extract(self, url):
video_id = self._match_id(url)
config = self._download_xml(
'https://video.golem.de/xml/{0}.xml'.format(video_id), video_id)
info = {
'id': video_id,
'title': config.findtext('./title', 'golem'),
'duration': self._float(config.findtext('./playtime'), 'duration'),
}
formats = []
for e in config:
url = e.findtext('./url')
if not url:
continue
formats.append({
'format_id': e.tag,
'url': compat_urlparse.urljoin(self._PREFIX, url),
'height': self._int(e.get('height'), 'height'),
'width': self._int(e.get('width'), 'width'),
'filesize': self._int(e.findtext('filesize'), 'filesize'),
'ext': determine_ext(e.findtext('./filename')),
})
self._sort_formats(formats)
info['formats'] = formats
thumbnails = []
for e in config.findall('.//teaser'):
url = e.findtext('./url')
if not url:
continue
thumbnails.append({
'url': compat_urlparse.urljoin(self._PREFIX, url),
'width': self._int(e.get('width'), 'thumbnail width'),
'height': self._int(e.get('height'), 'thumbnail height'),
})
info['thumbnails'] = thumbnails
return info
| unlicense |
opennode/waldur-mastermind | src/waldur_mastermind/marketplace_openstack/tests/test_metadata.py | 1 | 5277 | from waldur_mastermind.marketplace.tests import factories as marketplace_factories
from waldur_mastermind.marketplace_openstack.tests.utils import BaseOpenStackTest
from waldur_openstack.openstack_tenant.tests import (
fixtures as openstack_tenant_fixtures,
)
class VolumeMetadataTest(BaseOpenStackTest):
def setUp(self):
super(VolumeMetadataTest, self).setUp()
self.fixture = openstack_tenant_fixtures.OpenStackTenantFixture()
self.volume = self.fixture.volume
self.resource = marketplace_factories.ResourceFactory(scope=self.volume)
def test_action_is_synchronized(self):
self.volume.action = 'detach'
self.volume.action_details = {'message': 'Detaching volume from instance.'}
self.volume.save()
self.resource.refresh_from_db()
self.assertEqual(self.resource.backend_metadata['action'], self.volume.action)
self.assertEqual(
self.resource.backend_metadata['action_details'], self.volume.action_details
)
def test_size_is_synchronized(self):
self.volume.size = 100
self.volume.save()
self.resource.refresh_from_db()
self.assertEqual(self.resource.backend_metadata['size'], self.volume.size)
def test_name_is_synchronized(self):
self.volume.name = 'new volume name'
self.volume.save()
self.resource.refresh_from_db()
self.assertEqual(self.resource.name, self.volume.name)
def test_state_is_synchronized(self):
self.volume.set_erred()
self.volume.save()
self.resource.refresh_from_db()
self.assertEqual(
self.resource.backend_metadata['state'], self.volume.get_state_display()
)
self.assertEqual(
self.resource.backend_metadata['runtime_state'], self.volume.runtime_state
)
def test_instance_is_synchronized(self):
instance = self.fixture.instance
self.volume.instance = instance
self.volume.save()
self.resource.refresh_from_db()
self.assertEqual(
self.resource.backend_metadata['instance_uuid'], instance.uuid.hex
)
self.assertEqual(self.resource.backend_metadata['instance_name'], instance.name)
def test_instance_name_is_updated(self):
instance = self.fixture.instance
self.volume.instance = instance
self.volume.save()
instance.name = 'Name has been changed'
instance.save()
self.resource.refresh_from_db()
self.assertEqual(self.resource.backend_metadata['instance_name'], instance.name)
def test_instance_has_been_detached(self):
# Arrange
instance = self.fixture.instance
self.volume.instance = instance
self.volume.save()
# Act
self.volume.instance = None
self.volume.save()
# Assert
self.resource.refresh_from_db()
self.assertIsNone(self.resource.backend_metadata['instance_name'])
self.assertIsNone(self.resource.backend_metadata['instance_uuid'])
class NetworkMetadataTest(BaseOpenStackTest):
def setUp(self):
super(NetworkMetadataTest, self).setUp()
self.fixture = openstack_tenant_fixtures.OpenStackTenantFixture()
self.instance = self.fixture.instance
self.resource = marketplace_factories.ResourceFactory(scope=self.instance)
def test_internal_ip_address_is_synchronized(self):
internal_ip = self.fixture.internal_ip
self.resource.refresh_from_db()
self.assertEqual(
self.resource.backend_metadata['internal_ips'], internal_ip.fixed_ips
)
def test_internal_ip_address_is_updated(self):
internal_ip = self.fixture.internal_ip
internal_ip.fixed_ips = [
{'ip_address': '10.0.0.1', 'subnet_id': internal_ip.subnet.backend_id}
]
internal_ip.save()
self.resource.refresh_from_db()
self.assertEqual(
self.resource.backend_metadata['internal_ips'], ['10.0.0.1'],
)
def test_internal_ip_address_is_updated_on_delete(self):
internal_ip = self.fixture.internal_ip
internal_ip.fixed_ips = [
{'ip_address': '10.0.0.1', 'subnet_id': internal_ip.subnet.backend_id}
]
internal_ip.save()
self.resource.refresh_from_db()
internal_ip.delete()
self.resource.refresh_from_db()
self.assertEqual(self.resource.backend_metadata['internal_ips'], [])
def test_floating_ip_address_is_synchronized(self):
internal_ip = self.fixture.internal_ip
floating_ip = self.fixture.floating_ip
floating_ip.internal_ip = internal_ip
floating_ip.save()
self.resource.refresh_from_db()
self.assertEqual(
self.resource.backend_metadata['external_ips'], [floating_ip.address]
)
def test_floating_ip_address_is_synchronized_on_delete(self):
internal_ip = self.fixture.internal_ip
floating_ip = self.fixture.floating_ip
floating_ip.internal_ip = internal_ip
floating_ip.save()
floating_ip.delete()
self.resource.refresh_from_db()
self.assertEqual(self.resource.backend_metadata['external_ips'], [])
| mit |
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/eggs/boto-2.27.0-py2.7.egg/boto/s3/bucketlogging.py | 17 | 3175 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import xml.sax.saxutils
from acl import Grant
class BucketLogging(object):
def __init__(self, target=None, prefix=None, grants=None):
self.target = target
self.prefix = prefix
if grants is None:
self.grants = []
else:
self.grants = grants
def __repr__(self):
if self.target is None:
return "<BucketLoggingStatus: Disabled>"
grants = []
for g in self.grants:
if g.type == 'CanonicalUser':
u = g.display_name
elif g.type == 'Group':
u = g.uri
else:
u = g.email_address
grants.append("%s = %s" % (u, g.permission))
return "<BucketLoggingStatus: %s/%s (%s)>" % (self.target, self.prefix, ", ".join(grants))
def add_grant(self, grant):
self.grants.append(grant)
def startElement(self, name, attrs, connection):
if name == 'Grant':
self.grants.append(Grant())
return self.grants[-1]
else:
return None
def endElement(self, name, value, connection):
if name == 'TargetBucket':
self.target = value
elif name == 'TargetPrefix':
self.prefix = value
else:
setattr(self, name, value)
def to_xml(self):
# caller is responsible to encode to utf-8
s = u'<?xml version="1.0" encoding="UTF-8"?>'
s += u'<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01">'
if self.target is not None:
s += u'<LoggingEnabled>'
s += u'<TargetBucket>%s</TargetBucket>' % self.target
prefix = self.prefix or ''
s += u'<TargetPrefix>%s</TargetPrefix>' % xml.sax.saxutils.escape(prefix)
if self.grants:
s += '<TargetGrants>'
for grant in self.grants:
s += grant.to_xml()
s += '</TargetGrants>'
s += u'</LoggingEnabled>'
s += u'</BucketLoggingStatus>'
return s
| gpl-3.0 |
mohamed-ali/PyTables | tables/tests/test_indexes.py | 5 | 99639 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import os
import copy
import tempfile
import numpy
import tables
from tables import (
StringCol, BoolCol, IntCol, FloatCol, Int8Col, Int32Col, Int64Col,
UInt16Col, Float32Col,
)
from tables.index import Index, default_auto_index, default_index_filters
from tables.idxutils import calc_chunksize
from tables.exceptions import OldIndexWarning
from tables.tests import common
from tables.tests.common import verbose, allequal, heavy, TempFileMixin
from tables.tests.common import unittest, test_filename
from tables.tests.common import PyTablesTestCase as TestCase
import six
from six.moves import range
# Sensible parameters for indexing with small blocksizes
minRowIndex = 10
small_blocksizes = (96, 24, 6, 3)
class TDescr(tables.IsDescription):
var1 = StringCol(itemsize=4, dflt=b"", pos=1)
var2 = BoolCol(dflt=0, pos=2)
var3 = IntCol(dflt=0, pos=3)
var4 = FloatCol(dflt=0, pos=4)
class BasicTestCase(common.TempFileMixin, TestCase):
compress = 0
complib = "zlib"
shuffle = 0
fletcher32 = 0
nrows = minRowIndex
ss = small_blocksizes[2]
def setUp(self):
super(BasicTestCase, self).setUp()
self.rootgroup = self.h5file.root
self.populateFile()
# Close the file
self.h5file.close()
def populateFile(self):
group = self.rootgroup
# Create a table
title = "This is the IndexArray title"
self.filters = tables.Filters(complevel=self.compress,
complib=self.complib,
shuffle=self.shuffle,
fletcher32=self.fletcher32)
table = self.h5file.create_table(group, 'table', TDescr, title,
self.filters, self.nrows)
for i in range(self.nrows):
table.row['var1'] = str(i).encode('ascii')
# table.row['var2'] = i > 2
table.row['var2'] = i % 2
table.row['var3'] = i
table.row['var4'] = float(self.nrows - i - 1)
table.row.append()
table.flush()
# Index all entries:
for col in six.itervalues(table.colinstances):
indexrows = col.create_index(_blocksizes=small_blocksizes)
if verbose:
print("Number of written rows:", self.nrows)
print("Number of indexed rows:", indexrows)
return
def test00_flushLastRow(self):
"""Checking flushing an Index incrementing only the last row."""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test00_flushLastRow..." %
self.__class__.__name__)
# Open the HDF5 file in append mode
self.h5file = tables.open_file(self.h5fname, mode="a")
table = self.h5file.root.table
# Add just 3 rows more
for i in range(3):
table.row['var1'] = str(i).encode('ascii')
table.row.append()
table.flush() # redo the indexes
idxcol = table.cols.var1.index
if verbose:
print("Max rows in buf:", table.nrowsinbuf)
print("Number of elements per slice:", idxcol.slicesize)
print("Chunk size:", idxcol.sorted.chunksize)
print("Elements in last row:", idxcol.indicesLR[-1])
# Do a selection
results = [p["var1"] for p in table.where('var1 == b"1"')]
self.assertEqual(len(results), 2)
self.assertEqual(results, [b'1']*2)
def test00_update(self):
"""Checking automatic re-indexing after an update operation."""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test00_update..." % self.__class__.__name__)
# Open the HDF5 file in append mode
self.h5file = tables.open_file(self.h5fname, mode="a")
table = self.h5file.root.table
# Modify a couple of columns
for i, row in enumerate(table.where("(var3>1) & (var3<5)")):
row['var1'] = str(i)
row['var3'] = i
row.update()
table.flush() # redo the indexes
idxcol1 = table.cols.var1.index
idxcol3 = table.cols.var3.index
if verbose:
print("Dirtyness of var1 col:", idxcol1.dirty)
print("Dirtyness of var3 col:", idxcol3.dirty)
self.assertEqual(idxcol1.dirty, False)
self.assertEqual(idxcol3.dirty, False)
# Do a couple of selections
results = [p["var1"] for p in table.where('var1 == b"1"')]
self.assertEqual(len(results), 2)
self.assertEqual(results, [b'1']*2)
results = [p["var3"] for p in table.where('var3 == 0')]
self.assertEqual(len(results), 2)
self.assertEqual(results, [0]*2)
def test01_readIndex(self):
"""Checking reading an Index (string flavor)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test01_readIndex..." % self.__class__.__name__)
# Open the HDF5 file in read-only mode
self.h5file = tables.open_file(self.h5fname, mode="r")
table = self.h5file.root.table
idxcol = table.cols.var1.index
if verbose:
print("Max rows in buf:", table.nrowsinbuf)
print("Number of elements per slice:", idxcol.slicesize)
print("Chunk size:", idxcol.sorted.chunksize)
# Do a selection
results = [p["var1"] for p in table.where('var1 == b"1"')]
self.assertEqual(len(results), 1)
self.assertEqual(results, [b'1'])
def test02_readIndex(self):
"""Checking reading an Index (bool flavor)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test02_readIndex..." % self.__class__.__name__)
# Open the HDF5 file in read-only mode
self.h5file = tables.open_file(self.h5fname, mode="r")
table = self.h5file.root.table
idxcol = table.cols.var2.index
if verbose:
print("Rows in table:", table.nrows)
print("Max rows in buf:", table.nrowsinbuf)
print("Number of elements per slice:", idxcol.slicesize)
print("Chunk size:", idxcol.sorted.chunksize)
# Do a selection
results = [p["var2"] for p in table.where('var2 == True')]
if verbose:
print("Selected values:", results)
self.assertEqual(len(results), self.nrows // 2)
self.assertEqual(results, [True]*(self.nrows // 2))
def test03_readIndex(self):
"""Checking reading an Index (int flavor)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test03_readIndex..." % self.__class__.__name__)
# Open the HDF5 file in read-only mode
self.h5file = tables.open_file(self.h5fname, mode="r")
table = self.h5file.root.table
idxcol = table.cols.var3.index
if verbose:
print("Max rows in buf:", table.nrowsinbuf)
print("Number of elements per slice:", idxcol.slicesize)
print("Chunk size:", idxcol.sorted.chunksize)
# Do a selection
results = [p["var3"] for p in table.where('(1<var3)&(var3<10)')]
if verbose:
print("Selected values:", results)
self.assertEqual(len(results), min(10, table.nrows) - 2)
self.assertEqual(results, list(range(2, min(10, table.nrows))))
def test04_readIndex(self):
"""Checking reading an Index (float flavor)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test04_readIndex..." % self.__class__.__name__)
# Open the HDF5 file in read-only mode
self.h5file = tables.open_file(self.h5fname, mode="r")
table = self.h5file.root.table
idxcol = table.cols.var4.index
if verbose:
print("Max rows in buf:", table.nrowsinbuf)
print("Number of rows in table:", table.nrows)
print("Number of elements per slice:", idxcol.slicesize)
print("Chunk size:", idxcol.sorted.chunksize)
# Do a selection
results = [p["var4"] for p in table.where('var4 < 10')]
# results = [p["var4"] for p in table.where('(1<var4)&(var4<10)')]
if verbose:
print("Selected values:", results)
self.assertEqual(len(results), min(10, table.nrows))
self.assertEqual(results, [float(i) for i in
reversed(list(range(min(10, table.nrows))))])
def test05_getWhereList(self):
"""Checking reading an Index with get_where_list (string flavor)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test05_getWhereList..." %
self.__class__.__name__)
# Open the HDF5 file in read-write mode
self.h5file = tables.open_file(self.h5fname, mode="a")
table = self.h5file.root.table
idxcol = table.cols.var4.index
if verbose:
print("Max rows in buf:", table.nrowsinbuf)
print("Number of elements per slice:", idxcol.slicesize)
print("Chunk size:", idxcol.sorted.chunksize)
# Do a selection
table.flavor = "python"
rowList1 = table.get_where_list('var1 < b"10"')
rowList2 = [p.nrow for p in table if p['var1'] < b"10"]
if verbose:
print("Selected values:", rowList1)
print("Should look like:", rowList2)
self.assertEqual(len(rowList1), len(rowList2))
self.assertEqual(rowList1, rowList2)
def test06_getWhereList(self):
"""Checking reading an Index with get_where_list (bool flavor)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test06_getWhereList..." %
self.__class__.__name__)
# Open the HDF5 file in read-write mode
self.h5file = tables.open_file(self.h5fname, mode="a")
table = self.h5file.root.table
idxcol = table.cols.var2.index
if verbose:
print("Max rows in buf:", table.nrowsinbuf)
print("Rows in tables:", table.nrows)
print("Number of elements per slice:", idxcol.slicesize)
print("Chunk size:", idxcol.sorted.chunksize)
# Do a selection
table.flavor = "numpy"
rowList1 = table.get_where_list('var2 == False', sort=True)
rowList2 = [p.nrow for p in table if p['var2'] is False]
# Convert to a NumPy object
rowList2 = numpy.array(rowList2, numpy.int64)
if verbose:
print("Selected values:", rowList1)
print("Should look like:", rowList2)
self.assertEqual(len(rowList1), len(rowList2))
self.assertTrue(allequal(rowList1, rowList2))
def test07_getWhereList(self):
"""Checking reading an Index with get_where_list (int flavor)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test07_getWhereList..." %
self.__class__.__name__)
# Open the HDF5 file in read-write mode
self.h5file = tables.open_file(self.h5fname, mode="a")
table = self.h5file.root.table
idxcol = table.cols.var4.index
if verbose:
print("Max rows in buf:", table.nrowsinbuf)
print("Number of elements per slice:", idxcol.slicesize)
print("Chunk size:", idxcol.sorted.chunksize)
# Do a selection
table.flavor = "python"
rowList1 = table.get_where_list('var3 < 15', sort=True)
rowList2 = [p.nrow for p in table if p["var3"] < 15]
if verbose:
print("Selected values:", rowList1)
print("Should look like:", rowList2)
self.assertEqual(len(rowList1), len(rowList2))
self.assertEqual(rowList1, rowList2)
def test08_getWhereList(self):
"""Checking reading an Index with get_where_list (float flavor)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test08_getWhereList..." %
self.__class__.__name__)
# Open the HDF5 file in read-write mode
self.h5file = tables.open_file(self.h5fname, mode="a")
table = self.h5file.root.table
idxcol = table.cols.var4.index
if verbose:
print("Max rows in buf:", table.nrowsinbuf)
print("Number of elements per slice:", idxcol.slicesize)
print("Chunk size:", idxcol.sorted.chunksize)
# Do a selection
table.flavor = "python"
rowList1 = table.get_where_list('var4 < 10', sort=True)
rowList2 = [p.nrow for p in table if p['var4'] < 10]
if verbose:
print("Selected values:", rowList1)
print("Should look like:", rowList2)
self.assertEqual(len(rowList1), len(rowList2))
self.assertEqual(rowList1, rowList2)
def test09a_removeIndex(self):
"""Checking removing an index."""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test09a_removeIndex..." %
self.__class__.__name__)
# Open the HDF5 file in read-write mode
self.h5file = tables.open_file(self.h5fname, mode="a")
table = self.h5file.root.table
idxcol = table.cols.var1.index
if verbose:
print("Before deletion")
print("var1 column:", table.cols.var1)
self.assertEqual(table.colindexed["var1"], 1)
self.assertTrue(idxcol is not None)
# delete the index
table.cols.var1.remove_index()
if verbose:
print("After deletion")
print("var1 column:", table.cols.var1)
self.assertTrue(table.cols.var1.index is None)
self.assertEqual(table.colindexed["var1"], 0)
# re-create the index again
indexrows = table.cols.var1.create_index(_blocksizes=small_blocksizes)
self.assertTrue(indexrows is not None)
idxcol = table.cols.var1.index
if verbose:
print("After re-creation")
print("var1 column:", table.cols.var1)
self.assertTrue(idxcol is not None)
self.assertEqual(table.colindexed["var1"], 1)
def test09b_removeIndex(self):
"""Checking removing an index (persistent version)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test09b_removeIndex..." %
self.__class__.__name__)
# Open the HDF5 file in read-write mode
self.h5file = tables.open_file(self.h5fname, mode="a")
table = self.h5file.root.table
idxcol = table.cols.var1.index
if verbose:
print("Before deletion")
print("var1 index column:", table.cols.var1)
self.assertTrue(idxcol is not None)
self.assertEqual(table.colindexed["var1"], 1)
# delete the index
table.cols.var1.remove_index()
# close and reopen the file
self._reopen(mode="a")
table = self.h5file.root.table
idxcol = table.cols.var1.index
if verbose:
print("After deletion")
print("var1 column:", table.cols.var1)
self.assertTrue(table.cols.var1.index is None)
self.assertEqual(table.colindexed["var1"], 0)
# re-create the index again
indexrows = table.cols.var1.create_index(_blocksizes=small_blocksizes)
self.assertTrue(indexrows is not None)
idxcol = table.cols.var1.index
if verbose:
print("After re-creation")
print("var1 column:", table.cols.var1)
self.assertTrue(idxcol is not None)
self.assertEqual(table.colindexed["var1"], 1)
def test10a_moveIndex(self):
"""Checking moving a table with an index."""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test10a_moveIndex..." % self.__class__.__name__)
# Open the HDF5 file in read-write mode
self.h5file = tables.open_file(self.h5fname, mode="a")
table = self.h5file.root.table
idxcol = table.cols.var1.index
if verbose:
print("Before move")
print("var1 column:", idxcol)
self.assertEqual(table.colindexed["var1"], 1)
self.assertTrue(idxcol is not None)
# Create a new group called "agroup"
agroup = self.h5file.create_group("/", "agroup")
# move the table to "agroup"
table.move(agroup, "table2")
if verbose:
print("After move")
print("var1 column:", idxcol)
self.assertTrue(table.cols.var1.index is not None)
self.assertEqual(table.colindexed["var1"], 1)
# Some sanity checks
table.flavor = "python"
rowList1 = table.get_where_list('var1 < b"10"')
rowList2 = [p.nrow for p in table if p['var1'] < b"10"]
if verbose:
print("Selected values:", rowList1)
print("Should look like:", rowList2)
self.assertEqual(len(rowList1), len(rowList2))
self.assertEqual(rowList1, rowList2)
def test10b_moveIndex(self):
"""Checking moving a table with an index (persistent version)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test10b_moveIndex..." % self.__class__.__name__)
# Open the HDF5 file in read-write mode
self.h5file = tables.open_file(self.h5fname, mode="a")
table = self.h5file.root.table
idxcol = table.cols.var1.index
if verbose:
print("Before move")
print("var1 index column:", idxcol)
self.assertTrue(idxcol is not None)
self.assertEqual(table.colindexed["var1"], 1)
# Create a new group called "agroup"
agroup = self.h5file.create_group("/", "agroup")
# move the table to "agroup"
table.move(agroup, "table2")
# close and reopen the file
self._reopen(mode="a")
table = self.h5file.root.agroup.table2
idxcol = table.cols.var1.index
if verbose:
print("After move")
print("var1 column:", idxcol)
self.assertTrue(table.cols.var1.index is not None)
self.assertEqual(table.colindexed["var1"], 1)
# Some sanity checks
table.flavor = "python"
rowList1 = table.get_where_list('var1 < b"10"')
rowList2 = [p.nrow for p in table if p['var1'] < b"10"]
if verbose:
print("Selected values:", rowList1, type(rowList1))
print("Should look like:", rowList2, type(rowList2))
self.assertEqual(len(rowList1), len(rowList2))
self.assertEqual(rowList1, rowList2)
def test10c_moveIndex(self):
"""Checking moving a table with an index (small node cache)."""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test10c_moveIndex..." % self.__class__.__name__)
# Open the HDF5 file in read-write mode
self.h5file = tables.open_file(self.h5fname, mode="a",
node_cache_slots=10)
table = self.h5file.root.table
idxcol = table.cols.var1.index
if verbose:
print("Before move")
print("var1 column:", idxcol)
self.assertEqual(table.colindexed["var1"], 1)
self.assertTrue(idxcol is not None)
# Create a new group called "agroup"
agroup = self.h5file.create_group("/", "agroup")
# move the table to "agroup"
table.move(agroup, "table2")
if verbose:
print("After move")
print("var1 column:", idxcol)
self.assertTrue(table.cols.var1.index is not None)
self.assertEqual(table.colindexed["var1"], 1)
# Some sanity checks
table.flavor = "python"
rowList1 = table.get_where_list('var1 < b"10"')
rowList2 = [p.nrow for p in table if p['var1'] < b"10"]
if verbose:
print("Selected values:", rowList1)
print("Should look like:", rowList2)
self.assertEqual(len(rowList1), len(rowList2))
self.assertEqual(rowList1, rowList2)
def test10d_moveIndex(self):
"""Checking moving a table with an index (no node cache)."""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test10d_moveIndex..." % self.__class__.__name__)
# Open the HDF5 file in read-write mode
self.h5file = tables.open_file(self.h5fname, mode="a",
node_cache_slots=0)
table = self.h5file.root.table
idxcol = table.cols.var1.index
if verbose:
print("Before move")
print("var1 column:", idxcol)
self.assertEqual(table.colindexed["var1"], 1)
self.assertTrue(idxcol is not None)
# Create a new group called "agroup"
agroup = self.h5file.create_group("/", "agroup")
# move the table to "agroup"
table.move(agroup, "table2")
if verbose:
print("After move")
print("var1 column:", idxcol)
self.assertTrue(table.cols.var1.index is not None)
self.assertEqual(table.colindexed["var1"], 1)
# Some sanity checks
table.flavor = "python"
rowList1 = table.get_where_list('var1 < b"10"')
rowList2 = [p.nrow for p in table if p['var1'] < b"10"]
if verbose:
print("Selected values:", rowList1)
print("Should look like:", rowList2)
self.assertEqual(len(rowList1), len(rowList2))
self.assertEqual(rowList1, rowList2)
def test11a_removeTableWithIndex(self):
"""Checking removing a table with indexes."""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test11a_removeTableWithIndex..." %
self.__class__.__name__)
# Open the HDF5 file in read-write mode
self.h5file = tables.open_file(self.h5fname, mode="a")
table = self.h5file.root.table
idxcol = table.cols.var1.index
if verbose:
print("Before deletion")
print("var1 column:", table.cols.var1)
self.assertEqual(table.colindexed["var1"], 1)
self.assertTrue(idxcol is not None)
# delete the table
self.h5file.remove_node("/table")
if verbose:
print("After deletion")
self.assertTrue("table" not in self.h5file.root)
# re-create the table and the index again
table = self.h5file.create_table("/", 'table', TDescr, "New table",
self.filters, self.nrows)
for i in range(self.nrows):
table.row['var1'] = str(i)
table.row['var2'] = i % 2
table.row['var3'] = i
table.row['var4'] = float(self.nrows - i - 1)
table.row.append()
table.flush()
# Index all entries:
for col in six.itervalues(table.colinstances):
indexrows = col.create_index(_blocksizes=small_blocksizes)
self.assertTrue(indexrows is not None)
idxcol = table.cols.var1.index
if verbose:
print("After re-creation")
print("var1 column:", table.cols.var1)
self.assertTrue(idxcol is not None)
self.assertEqual(table.colindexed["var1"], 1)
def test11b_removeTableWithIndex(self):
"""Checking removing a table with indexes (persistent version 2)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test11b_removeTableWithIndex..." %
self.__class__.__name__)
self.h5file = tables.open_file(self.h5fname, mode="a")
table = self.h5file.root.table
idxcol = table.cols.var1.index
if verbose:
print("Before deletion")
print("var1 column:", table.cols.var1)
self.assertEqual(table.colindexed["var1"], 1)
self.assertTrue(idxcol is not None)
# delete the table
self.h5file.remove_node("/table")
if verbose:
print("After deletion")
self.assertTrue("table" not in self.h5file.root)
# close and reopen the file
self._reopen(mode="r+")
# re-create the table and the index again
table = self.h5file.create_table("/", 'table', TDescr, "New table",
self.filters, self.nrows)
for i in range(self.nrows):
table.row['var1'] = str(i)
table.row['var2'] = i % 2
table.row['var3'] = i
table.row['var4'] = float(self.nrows - i - 1)
table.row.append()
table.flush()
# Index all entries:
for col in six.itervalues(table.colinstances):
indexrows = col.create_index(_blocksizes=small_blocksizes)
self.assertTrue(indexrows is not None)
idxcol = table.cols.var1.index
if verbose:
print("After re-creation")
print("var1 column:", table.cols.var1)
self.assertTrue(idxcol is not None)
self.assertEqual(table.colindexed["var1"], 1)
# Test provided by Andrew Straw
def test11c_removeTableWithIndex(self):
"""Checking removing a table with indexes (persistent version 3)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test11c_removeTableWithIndex..." %
self.__class__.__name__)
class Distance(tables.IsDescription):
frame = Int32Col(pos=0)
distance = FloatCol(pos=1)
# Delete the old temporal file
os.remove(self.h5fname)
self.h5fname = tempfile.mktemp(".h5")
self.h5file = tables.open_file(self.h5fname, mode='w')
table = self.h5file.create_table(
self.h5file.root, 'distance_table', Distance)
table.cols.frame.create_index(_blocksizes=small_blocksizes)
r = table.row
for i in range(10):
r['frame'] = i
r['distance'] = float(i**2)
r.append()
table.flush()
self._reopen(mode='r+')
self.h5file.remove_node(self.h5file.root.distance_table)
def test12_doubleIterate(self):
self.h5file = tables.open_file(self.h5fname, mode="r")
table = self.h5file.root.table
tests = [1, 4, self.nrows]
if self.nrows > 500:
tests.append(self.nrows - 500)
for limit in tests:
handle_a = [0, table.where('(var3 < e)', dict(e=limit))]
handle_b = [0, table.where('(var3 < e)', dict(e=limit))]
try:
while True:
next(handle_b[1])
handle_b[0] += 1
except StopIteration:
for _ in handle_a[1]:
handle_a[0] += 1
for _ in handle_b[1]:
handle_b[0] += 1
self.assertEqual(handle_a[0], limit)
self.assertEqual(handle_b[0], limit)
self.assertEqual(
len(list(table.where('(var3 < e)', dict(e=limit)))), limit)
small_ss = small_blocksizes[2]
class BasicReadTestCase(BasicTestCase):
compress = 0
complib = "zlib"
shuffle = 0
fletcher32 = 0
nrows = small_ss
class ZlibReadTestCase(BasicTestCase):
compress = 1
complib = "zlib"
shuffle = 0
fletcher32 = 0
nrows = small_ss
@unittest.skipIf(not common.blosc_avail,
'BLOSC compression library not available')
class BloscReadTestCase(BasicTestCase):
compress = 1
complib = "blosc"
shuffle = 0
fletcher32 = 0
nrows = small_ss
@unittest.skipIf(not common.lzo_avail, 'LZO compression library not available')
class LZOReadTestCase(BasicTestCase):
compress = 1
complib = "lzo"
shuffle = 0
fletcher32 = 0
nrows = small_ss
@unittest.skipIf(not common.bzip2_avail,
'BZIP2 compression library not available')
class Bzip2ReadTestCase(BasicTestCase):
compress = 1
complib = "bzip2"
shuffle = 0
fletcher32 = 0
nrows = small_ss
class ShuffleReadTestCase(BasicTestCase):
compress = 1
complib = "zlib"
shuffle = 1
fletcher32 = 0
nrows = small_ss
class Fletcher32ReadTestCase(BasicTestCase):
compress = 1
complib = "zlib"
shuffle = 0
fletcher32 = 1
nrows = small_ss
class ShuffleFletcher32ReadTestCase(BasicTestCase):
compress = 1
complib = "zlib"
shuffle = 1
fletcher32 = 1
nrows = small_ss
class OneHalfTestCase(BasicTestCase):
nrows = small_ss + small_ss//2
class UpperBoundTestCase(BasicTestCase):
nrows = small_ss + 1
class LowerBoundTestCase(BasicTestCase):
nrows = small_ss * 2-1
class DeepTableIndexTestCase(common.TempFileMixin, TestCase):
nrows = minRowIndex
def test01(self):
"""Checking the indexing of a table in a 2nd level hierarchy"""
# Create an instance of an HDF5 Table
group = self.h5file.create_group(self.h5file.root, "agroup")
# Create a table
title = "This is the IndexArray title"
table = self.h5file.create_table(group, 'table', TDescr, title,
None, self.nrows)
for i in range(self.nrows):
# Fill rows with defaults
table.row.append()
table.flush()
# Index some column
indexrows = table.cols.var1.create_index()
self.assertTrue(indexrows is not None)
idxcol = table.cols.var1.index
# Some sanity checks
self.assertEqual(table.colindexed["var1"], 1)
self.assertTrue(idxcol is not None)
self.assertEqual(idxcol.nelements, self.nrows)
def test01b(self):
"""Checking the indexing of a table in 2nd level
(persistent version)"""
# Create an instance of an HDF5 Table
group = self.h5file.create_group(self.h5file.root, "agroup")
# Create a table
title = "This is the IndexArray title"
table = self.h5file.create_table(group, 'table', TDescr, title,
None, self.nrows)
for i in range(self.nrows):
# Fill rows with defaults
table.row.append()
table.flush()
# Index some column
indexrows = table.cols.var1.create_index()
self.assertTrue(indexrows is not None)
idxcol = table.cols.var1.index
# Close and re-open this file
self._reopen(mode='a')
table = self.h5file.root.agroup.table
idxcol = table.cols.var1.index
# Some sanity checks
self.assertEqual(table.colindexed["var1"], 1)
self.assertTrue(idxcol is not None)
self.assertEqual(idxcol.nelements, self.nrows)
def test02(self):
"""Checking the indexing of a table in a 4th level hierarchy"""
# Create an instance of an HDF5 Table
group = self.h5file.create_group(self.h5file.root, "agroup")
group = self.h5file.create_group(group, "agroup")
group = self.h5file.create_group(group, "agroup")
# Create a table
title = "This is the IndexArray title"
table = self.h5file.create_table(group, 'table', TDescr, title,
None, self.nrows)
for i in range(self.nrows):
# Fill rows with defaults
table.row.append()
table.flush()
# Index some column
indexrows = table.cols.var1.create_index()
self.assertTrue(indexrows is not None)
idxcol = table.cols.var1.index
# Some sanity checks
self.assertEqual(table.colindexed["var1"], 1)
self.assertTrue(idxcol is not None)
self.assertEqual(idxcol.nelements, self.nrows)
def test02b(self):
"""Checking the indexing of a table in a 4th level
(persistent version)"""
# Create an instance of an HDF5 Table
group = self.h5file.create_group(self.h5file.root, "agroup")
group = self.h5file.create_group(group, "agroup")
group = self.h5file.create_group(group, "agroup")
# Create a table
title = "This is the IndexArray title"
table = self.h5file.create_table(group, 'table', TDescr, title,
None, self.nrows)
for i in range(self.nrows):
# Fill rows with defaults
table.row.append()
table.flush()
# Index some column
indexrows = table.cols.var1.create_index()
self.assertTrue(indexrows is not None)
idxcol = table.cols.var1.index
# Close and re-open this file
self._reopen(mode='a')
table = self.h5file.root.agroup.agroup.agroup.table
idxcol = table.cols.var1.index
# Some sanity checks
self.assertEqual(table.colindexed["var1"], 1)
self.assertTrue(idxcol is not None)
self.assertEqual(idxcol.nelements, self.nrows)
def test03(self):
"""Checking the indexing of a table in a 100th level hierarchy"""
# Create an instance of an HDF5 Table
group = self.h5file.root
for i in range(100):
group = self.h5file.create_group(group, "agroup")
# Create a table
title = "This is the IndexArray title"
table = self.h5file.create_table(group, 'table', TDescr, title,
None, self.nrows)
for i in range(self.nrows):
# Fill rows with defaults
table.row.append()
table.flush()
# Index some column
indexrows = table.cols.var1.create_index()
self.assertTrue(indexrows is not None)
idxcol = table.cols.var1.index
# Some sanity checks
self.assertEqual(table.colindexed["var1"], 1)
self.assertTrue(idxcol is not None)
self.assertEqual(idxcol.nelements, self.nrows)
class IndexProps(object):
def __init__(self, auto=default_auto_index, filters=default_index_filters):
self.auto = auto
self.filters = filters
DefaultProps = IndexProps()
NoAutoProps = IndexProps(auto=False)
ChangeFiltersProps = IndexProps(
filters=tables.Filters(complevel=6, complib="zlib",
shuffle=False, fletcher32=False))
class AutomaticIndexingTestCase(common.TempFileMixin, TestCase):
reopen = 1
iprops = NoAutoProps
colsToIndex = ['var1', 'var2', 'var3']
small_blocksizes = (16, 8, 4, 2)
def setUp(self):
super(AutomaticIndexingTestCase, self).setUp()
# Create an instance of an HDF5 Table
title = "This is the IndexArray title"
root = self.h5file.root
# Make the chunkshape smaller or equal than small_blocksizes[-1]
chunkshape = (2,)
self.table = self.h5file.create_table(root, 'table', TDescr, title,
None, self.nrows,
chunkshape=chunkshape)
self.table.autoindex = self.iprops.auto
for colname in self.colsToIndex:
self.table.colinstances[colname].create_index(
_blocksizes=self.small_blocksizes)
for i in range(self.nrows):
# Fill rows with defaults
self.table.row.append()
self.table.flush()
if self.reopen:
self._reopen(mode='a')
self.table = self.h5file.root.table
def test01_attrs(self):
"""Checking indexing attributes (part1)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test01_attrs..." % self.__class__.__name__)
table = self.table
if self.iprops is DefaultProps:
self.assertEqual(table.indexed, 0)
else:
self.assertEqual(table.indexed, 1)
if self.iprops is DefaultProps:
self.assertEqual(table.colindexed["var1"], 0)
self.assertTrue(table.cols.var1.index is None)
self.assertEqual(table.colindexed["var2"], 0)
self.assertTrue(table.cols.var2.index is None)
self.assertEqual(table.colindexed["var3"], 0)
self.assertTrue(table.cols.var3.index is None)
self.assertEqual(table.colindexed["var4"], 0)
self.assertTrue(table.cols.var4.index is None)
else:
# Check that the var1, var2 and var3 (and only these)
# has been indexed
self.assertEqual(table.colindexed["var1"], 1)
self.assertTrue(table.cols.var1.index is not None)
self.assertEqual(table.colindexed["var2"], 1)
self.assertTrue(table.cols.var2.index is not None)
self.assertEqual(table.colindexed["var3"], 1)
self.assertTrue(table.cols.var3.index is not None)
self.assertEqual(table.colindexed["var4"], 0)
self.assertTrue(table.cols.var4.index is None)
def test02_attrs(self):
"""Checking indexing attributes (part2)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test02_attrs..." % self.__class__.__name__)
table = self.table
# Check the policy parameters
if verbose:
if table.indexed:
print("index props:", table.autoindex)
else:
print("Table is not indexed")
# Check non-default values for index saving policy
if self.iprops is NoAutoProps:
self.assertFalse(table.autoindex)
elif self.iprops is ChangeFiltersProps:
self.assertTrue(table.autoindex)
# Check Index() objects exists and are properly placed
if self.iprops is DefaultProps:
self.assertEqual(table.cols.var1.index, None)
self.assertEqual(table.cols.var2.index, None)
self.assertEqual(table.cols.var3.index, None)
self.assertEqual(table.cols.var4.index, None)
else:
self.assertTrue(isinstance(table.cols.var1.index, Index))
self.assertTrue(isinstance(table.cols.var2.index, Index))
self.assertTrue(isinstance(table.cols.var3.index, Index))
self.assertEqual(table.cols.var4.index, None)
def test03_counters(self):
"""Checking indexing counters"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test03_counters..." % self.__class__.__name__)
table = self.table
# Check the counters for indexes
if verbose:
if table.indexed:
print("indexedrows:", table._indexedrows)
print("unsavedindexedrows:", table._unsaved_indexedrows)
index = table.cols.var1.index
print("table rows:", table.nrows)
print("computed indexed rows:", index.nrows * index.slicesize)
else:
print("Table is not indexed")
if self.iprops is not DefaultProps:
index = table.cols.var1.index
indexedrows = index.nelements
self.assertEqual(table._indexedrows, indexedrows)
indexedrows = index.nelements
self.assertEqual(table._unsaved_indexedrows,
self.nrows - indexedrows)
def test04_noauto(self):
"""Checking indexing counters (non-automatic mode)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test04_noauto..." % self.__class__.__name__)
table = self.table
# Force a sync in indexes
table.flush_rows_to_index()
# Check the counters for indexes
if verbose:
if table.indexed:
print("indexedrows:", table._indexedrows)
print("unsavedindexedrows:", table._unsaved_indexedrows)
index = table.cols.var1.index
print("computed indexed rows:", index.nelements)
else:
print("Table is not indexed")
# No unindexated rows should remain
index = table.cols.var1.index
if self.iprops is DefaultProps:
self.assertTrue(index is None)
else:
indexedrows = index.nelements
self.assertEqual(table._indexedrows, index.nelements)
self.assertEqual(table._unsaved_indexedrows,
self.nrows - indexedrows)
# Check non-default values for index saving policy
if self.iprops is NoAutoProps:
self.assertFalse(table.autoindex)
elif self.iprops is ChangeFiltersProps:
self.assertTrue(table.autoindex)
def test05_icounters(self):
"""Checking indexing counters (remove_rows)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test05_icounters..." % self.__class__.__name__)
table = self.table
# Force a sync in indexes
table.flush_rows_to_index()
# Non indexated rows should remain here
if self.iprops is not DefaultProps:
indexedrows = table._indexedrows
unsavedindexedrows = table._unsaved_indexedrows
# Now, remove some rows:
table.remove_rows(2, 4)
if self.reopen:
self._reopen(mode='a')
table = self.h5file.root.table
# Check the counters for indexes
if verbose:
if table.indexed:
print("indexedrows:", table._indexedrows)
print("original indexedrows:", indexedrows)
print("unsavedindexedrows:", table._unsaved_indexedrows)
print("original unsavedindexedrows:", unsavedindexedrows)
# index = table.cols.var1.index
print("index dirty:", table.cols.var1.index.dirty)
else:
print("Table is not indexed")
# Check the counters
self.assertEqual(table.nrows, self.nrows - 2)
if self.iprops is NoAutoProps:
self.assertTrue(table.cols.var1.index.dirty)
# Check non-default values for index saving policy
if self.iprops is NoAutoProps:
self.assertFalse(table.autoindex)
elif self.iprops is ChangeFiltersProps:
self.assertTrue(table.autoindex)
def test06_dirty(self):
"""Checking dirty flags (remove_rows action)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test06_dirty..." % self.__class__.__name__)
table = self.table
# Force a sync in indexes
table.flush_rows_to_index()
# Now, remove some rows:
table.remove_rows(3, 5)
if self.reopen:
self._reopen(mode='a')
table = self.h5file.root.table
# Check the dirty flag for indexes
if verbose:
print("auto flag:", table.autoindex)
for colname in table.colnames:
if table.cols._f_col(colname).index:
print("dirty flag col %s: %s" %
(colname, table.cols._f_col(colname).index.dirty))
# Check the flags
for colname in table.colnames:
if table.cols._f_col(colname).index:
if not table.autoindex:
self.assertEqual(table.cols._f_col(colname).index.dirty,
True)
else:
self.assertEqual(table.cols._f_col(colname).index.dirty,
False)
def test07_noauto(self):
"""Checking indexing counters (modify_rows, no-auto mode)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test07_noauto..." % self.__class__.__name__)
table = self.table
# Force a sync in indexes
table.flush_rows_to_index()
# No unindexated rows should remain here
if self.iprops is not DefaultProps:
indexedrows = table._indexedrows
unsavedindexedrows = table._unsaved_indexedrows
# Now, modify just one row:
table.modify_rows(3, None, 1, [["asa", 0, 3, 3.1]])
if self.reopen:
self._reopen(mode='a')
table = self.h5file.root.table
# Check the counters for indexes
if verbose:
if table.indexed:
print("indexedrows:", table._indexedrows)
print("original indexedrows:", indexedrows)
print("unsavedindexedrows:", table._unsaved_indexedrows)
print("original unsavedindexedrows:", unsavedindexedrows)
index = table.cols.var1.index
print("computed indexed rows:", index.nelements)
else:
print("Table is not indexed")
# Check the counters
self.assertEqual(table.nrows, self.nrows)
if self.iprops is NoAutoProps:
self.assertTrue(table.cols.var1.index.dirty)
# Check the dirty flag for indexes
if verbose:
for colname in table.colnames:
if table.cols._f_col(colname).index:
print("dirty flag col %s: %s" %
(colname, table.cols._f_col(colname).index.dirty))
for colname in table.colnames:
if table.cols._f_col(colname).index:
if not table.autoindex:
self.assertEqual(table.cols._f_col(colname).index.dirty,
True)
else:
self.assertEqual(table.cols._f_col(colname).index.dirty,
False)
def test07b_noauto(self):
"""Checking indexing queries (modify in iterator, no-auto mode)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test07b_noauto..." % self.__class__.__name__)
table = self.table
# Force a sync in indexes
table.flush_rows_to_index()
# Do a query that uses indexes
res = [row.nrow for row in table.where('(var2 == True) & (var3 > 0)')]
# Now, modify just one row:
for row in table:
if row.nrow == 3:
row['var1'] = "asa"
row['var2'] = True
row['var3'] = 3
row['var4'] = 3.1
row.update()
table.flush()
if self.reopen:
self._reopen(mode='a')
table = self.h5file.root.table
# Do a query that uses indexes
resq = [row.nrow for row in table.where('(var2 == True) & (var3 > 0)')]
res_ = res + [3]
if verbose:
print("AutoIndex?:", table.autoindex)
print("Query results (original):", res)
print("Query results (after modifying table):", resq)
print("Should look like:", res_)
self.assertEqual(res_, resq)
def test07c_noauto(self):
"""Checking indexing queries (append, no-auto mode)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test07c_noauto..." % self.__class__.__name__)
table = self.table
# Force a sync in indexes
table.flush_rows_to_index()
# Do a query that uses indexes
res = [row.nrow for row in table.where('(var2 == True) & (var3 > 0)')]
# Now, append three rows
table.append([("asa", True, 1, 3.1)])
table.append([("asb", True, 2, 3.1)])
table.append([("asc", True, 3, 3.1)])
table.flush()
if self.reopen:
self._reopen(mode='a')
table = self.h5file.root.table
# Do a query that uses indexes
resq = [row.nrow for row in table.where('(var2 == True) & (var3 > 0)')]
res_ = res + [table.nrows-3, table.nrows-2, table.nrows-1]
if verbose:
print("AutoIndex?:", table.autoindex)
print("Query results (original):", res)
print("Query results (after modifying table):", resq)
print("Should look like:", res_)
self.assertEqual(res_, resq)
def test08_dirty(self):
"""Checking dirty flags (modify_columns)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test08_dirty..." % self.__class__.__name__)
table = self.table
# Force a sync in indexes
table.flush_rows_to_index()
# Non indexated rows should remain here
if self.iprops is not DefaultProps:
indexedrows = table._indexedrows
self.assertTrue(indexedrows is not None)
unsavedindexedrows = table._unsaved_indexedrows
self.assertTrue(unsavedindexedrows is not None)
# Now, modify a couple of rows:
table.modify_columns(1, columns=[["asa", "asb"], [1., 2.]],
names=["var1", "var4"])
if self.reopen:
self._reopen(mode='a')
table = self.h5file.root.table
# Check the counters
self.assertEqual(table.nrows, self.nrows)
if self.iprops is NoAutoProps:
self.assertTrue(table.cols.var1.index.dirty)
# Check the dirty flag for indexes
if verbose:
for colname in table.colnames:
if table.cols._f_col(colname).index:
print("dirty flag col %s: %s" %
(colname, table.cols._f_col(colname).index.dirty))
for colname in table.colnames:
if table.cols._f_col(colname).index:
if not table.autoindex:
if colname in ["var1"]:
self.assertEqual(
table.cols._f_col(colname).index.dirty, True)
else:
self.assertEqual(
table.cols._f_col(colname).index.dirty, False)
else:
self.assertEqual(table.cols._f_col(colname).index.dirty,
False)
def test09a_propIndex(self):
"""Checking propagate Index feature in Table.copy() (attrs)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test09a_propIndex..." % self.__class__.__name__)
table = self.table
# Don't force a sync in indexes
# table.flush_rows_to_index()
# Non indexated rows should remain here
if self.iprops is not DefaultProps:
indexedrows = table._indexedrows
self.assertTrue(indexedrows is not None)
unsavedindexedrows = table._unsaved_indexedrows
self.assertTrue(unsavedindexedrows is not None)
# Now, remove some rows to make columns dirty
# table.remove_rows(3,5)
# Copy a Table to another location
table2 = table.copy("/", 'table2', propindexes=True)
if self.reopen:
self._reopen(mode='a')
table = self.h5file.root.table
table2 = self.h5file.root.table2
index1 = table.cols.var1.index
index2 = table2.cols.var1.index
if verbose:
print("Copied index:", index2)
print("Original index:", index1)
if index1:
print("Elements in copied index:", index2.nelements)
print("Elements in original index:", index1.nelements)
# Check the counters
self.assertEqual(table.nrows, table2.nrows)
if table.indexed:
self.assertTrue(table2.indexed)
if self.iprops is DefaultProps:
# No index: the index should not exist
self.assertTrue(index1 is None)
self.assertTrue(index2 is None)
elif self.iprops is NoAutoProps:
self.assertTrue(index2 is not None)
# Check the dirty flag for indexes
if verbose:
for colname in table2.colnames:
if table2.cols._f_col(colname).index:
print("dirty flag col %s: %s" %
(colname, table2.cols._f_col(colname).index.dirty))
for colname in table2.colnames:
if table2.cols._f_col(colname).index:
self.assertEqual(table2.cols._f_col(colname).index.dirty,
False)
def test09b_propIndex(self):
"""Checking that propindexes=False works"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test09b_propIndex..." % self.__class__.__name__)
table = self.table
# Don't force a sync in indexes
# table.flush_rows_to_index()
# Non indexated rows should remain here
if self.iprops is not DefaultProps:
indexedrows = table._indexedrows
self.assertTrue(indexedrows is not None)
unsavedindexedrows = table._unsaved_indexedrows
self.assertTrue(unsavedindexedrows is not None)
# Now, remove some rows to make columns dirty
# table.remove_rows(3,5)
# Copy a Table to another location
table2 = table.copy("/", 'table2', propindexes=False)
if self.reopen:
self._reopen(mode='a')
table = self.h5file.root.table
table2 = self.h5file.root.table2
if verbose:
print("autoindex?:", self.iprops.auto)
print("Copied index indexed?:", table2.cols.var1.is_indexed)
print("Original index indexed?:", table.cols.var1.is_indexed)
if self.iprops is DefaultProps:
# No index: the index should not exist
self.assertFalse(table2.cols.var1.is_indexed)
self.assertFalse(table.cols.var1.is_indexed)
elif self.iprops is NoAutoProps:
self.assertFalse(table2.cols.var1.is_indexed)
self.assertTrue(table.cols.var1.is_indexed)
def test10_propIndex(self):
"""Checking propagate Index feature in Table.copy() (values)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test10_propIndex..." % self.__class__.__name__)
table = self.table
# Don't force a sync in indexes
# table.flush_rows_to_index()
# Non indexated rows should remain here
if self.iprops is not DefaultProps:
indexedrows = table._indexedrows
self.assertTrue(indexedrows is not None)
unsavedindexedrows = table._unsaved_indexedrows
self.assertTrue(unsavedindexedrows is not None)
# Now, remove some rows to make columns dirty
# table.remove_rows(3,5)
# Copy a Table to another location
table2 = table.copy("/", 'table2', propindexes=True)
if self.reopen:
self._reopen(mode='a')
table = self.h5file.root.table
table2 = self.h5file.root.table2
index1 = table.cols.var3.index
index2 = table2.cols.var3.index
if verbose:
print("Copied index:", index2)
print("Original index:", index1)
if index1:
print("Elements in copied index:", index2.nelements)
print("Elements in original index:", index1.nelements)
def test11_propIndex(self):
"""Checking propagate Index feature in Table.copy() (dirty flags)"""
if verbose:
print('\n', '-=' * 30)
print("Running %s.test11_propIndex..." % self.__class__.__name__)
table = self.table
# Force a sync in indexes
table.flush_rows_to_index()
# Non indexated rows should remain here
if self.iprops is not DefaultProps:
indexedrows = table._indexedrows
self.assertTrue(indexedrows is not None)
unsavedindexedrows = table._unsaved_indexedrows
self.assertTrue(unsavedindexedrows is not None)
# Now, modify an indexed column and an unindexed one
# to make the "var1" dirty
table.modify_columns(1, columns=[["asa", "asb"], [1., 2.]],
names=["var1", "var4"])
# Copy a Table to another location
table2 = table.copy("/", 'table2', propindexes=True)
if self.reopen:
self._reopen(mode='a')
table = self.h5file.root.table
table2 = self.h5file.root.table2
index1 = table.cols.var1.index
index2 = table2.cols.var1.index
if verbose:
print("Copied index:", index2)
print("Original index:", index1)
if index1:
print("Elements in copied index:", index2.nelements)
print("Elements in original index:", index1.nelements)
# Check the dirty flag for indexes
if verbose:
for colname in table2.colnames:
if table2.cols._f_col(colname).index:
print("dirty flag col %s: %s" %
(colname, table2.cols._f_col(colname).index.dirty))
for colname in table2.colnames:
if table2.cols._f_col(colname).index:
if table2.autoindex:
# All the destination columns should be non-dirty because
# the copy removes the dirty state and puts the
# index in a sane state
self.assertEqual(table2.cols._f_col(colname).index.dirty,
False)
# minRowIndex = 10000 # just if one wants more indexed rows to be checked
class AI1TestCase(AutomaticIndexingTestCase):
# nrows = 10002
nrows = 102
reopen = 0
iprops = NoAutoProps
colsToIndex = ['var1', 'var2', 'var3']
class AI2TestCase(AutomaticIndexingTestCase):
# nrows = 10002
nrows = 102
reopen = 1
iprops = NoAutoProps
colsToIndex = ['var1', 'var2', 'var3']
class AI4bTestCase(AutomaticIndexingTestCase):
# nrows = 10012
nrows = 112
reopen = 1
iprops = NoAutoProps
colsToIndex = ['var1', 'var2', 'var3']
class AI5TestCase(AutomaticIndexingTestCase):
sbs, bs, ss, cs = calc_chunksize(minRowIndex, memlevel=1)
nrows = ss * 11-1
reopen = 0
iprops = NoAutoProps
colsToIndex = ['var1', 'var2', 'var3']
class AI6TestCase(AutomaticIndexingTestCase):
sbs, bs, ss, cs = calc_chunksize(minRowIndex, memlevel=1)
nrows = ss * 21 + 1
reopen = 1
iprops = NoAutoProps
colsToIndex = ['var1', 'var2', 'var3']
class AI7TestCase(AutomaticIndexingTestCase):
sbs, bs, ss, cs = calc_chunksize(minRowIndex, memlevel=1)
nrows = ss * 12-1
# nrows = ss * 1-1 # faster test
reopen = 0
iprops = NoAutoProps
colsToIndex = ['var1', 'var2', 'var3']
class AI8TestCase(AutomaticIndexingTestCase):
sbs, bs, ss, cs = calc_chunksize(minRowIndex, memlevel=1)
nrows = ss * 15 + 100
# nrows = ss * 1 + 100 # faster test
reopen = 1
iprops = NoAutoProps
colsToIndex = ['var1', 'var2', 'var3']
class AI9TestCase(AutomaticIndexingTestCase):
sbs, bs, ss, cs = calc_chunksize(minRowIndex, memlevel=1)
nrows = ss
reopen = 0
iprops = DefaultProps
colsToIndex = []
class AI10TestCase(AutomaticIndexingTestCase):
# nrows = 10002
nrows = 102
reopen = 1
iprops = DefaultProps
colsToIndex = []
class AI11TestCase(AutomaticIndexingTestCase):
# nrows = 10002
nrows = 102
reopen = 0
iprops = ChangeFiltersProps
colsToIndex = ['var1', 'var2', 'var3']
class AI12TestCase(AutomaticIndexingTestCase):
# nrows = 10002
nrows = 102
reopen = 0
iprops = ChangeFiltersProps
colsToIndex = ['var1', 'var2', 'var3']
class ManyNodesTestCase(common.TempFileMixin, TestCase):
opem_kwargs = dict(node_cache_slots=64)
def test00(self):
"""Indexing many nodes in one single session (based on bug #26)"""
IdxRecord = {
'f0': Int8Col(),
'f1': Int8Col(),
'f2': Int8Col(),
}
for qn in range(5):
for sn in range(5):
qchr = 'chr' + str(qn)
name = 'chr' + str(sn)
path = "/at/%s/pt" % (qchr)
table = self.h5file.create_table(
path, name, IdxRecord, createparents=1)
table.cols.f0.create_index()
table.cols.f1.create_index()
table.cols.f2.create_index()
table.row.append()
table.flush()
class IndexPropsChangeTestCase(TempFileMixin, TestCase):
"""Test case for changing index properties in a table."""
class MyDescription(tables.IsDescription):
icol = IntCol()
oldIndexProps = IndexProps()
newIndexProps = IndexProps(auto=False, filters=tables.Filters(complevel=9))
def setUp(self):
super(IndexPropsChangeTestCase, self).setUp()
table = self.h5file.create_table('/', 'test', self.MyDescription)
table.autoindex = self.oldIndexProps.auto
row = table.row
for i in range(100):
row['icol'] = i % 25
row.append()
table.flush()
self.table = table
def test_attributes(self):
"""Storing index properties as table attributes."""
for refprops in [self.oldIndexProps, self.newIndexProps]:
self.assertEqual(self.table.autoindex, refprops.auto)
self.table.autoindex = self.newIndexProps.auto
def test_copyattrs(self):
"""Copying index properties attributes."""
oldtable = self.table
newtable = oldtable.copy('/', 'test2')
self.assertEqual(oldtable.autoindex, newtable.autoindex)
class IndexFiltersTestCase(TempFileMixin, TestCase):
"""Test case for setting index filters."""
def setUp(self):
super(IndexFiltersTestCase, self).setUp()
description = {'icol': IntCol()}
self.table = self.h5file.create_table('/', 'test', description)
def test_createIndex(self):
"""Checking input parameters in new indexes."""
# Different from default.
argfilters = copy.copy(default_index_filters)
argfilters.shuffle = not default_index_filters.shuffle
# Different both from default and the previous one.
idxfilters = copy.copy(default_index_filters)
idxfilters.shuffle = not default_index_filters.shuffle
idxfilters.fletcher32 = not default_index_filters.fletcher32
icol = self.table.cols.icol
# First create
icol.create_index(kind='ultralight', optlevel=4)
self.assertEqual(icol.index.kind, 'ultralight')
self.assertEqual(icol.index.optlevel, 4)
self.assertEqual(icol.index.filters, default_index_filters)
icol.remove_index()
# Second create
icol.create_index(kind='medium', optlevel=3, filters=argfilters)
self.assertEqual(icol.index.kind, 'medium')
self.assertEqual(icol.index.optlevel, 3)
self.assertEqual(icol.index.filters, argfilters)
icol.remove_index()
def test_reindex(self):
"""Checking input parameters in recomputed indexes."""
icol = self.table.cols.icol
icol.create_index(
kind='full', optlevel=5, filters=tables.Filters(complevel=3))
kind = icol.index.kind
optlevel = icol.index.optlevel
filters = icol.index.filters
icol.reindex()
ni = icol.index
if verbose:
print("Old parameters: %s, %s, %s" % (kind, optlevel, filters))
print("New parameters: %s, %s, %s" % (
ni.kind, ni.optlevel, ni.filters))
self.assertEqual(ni.kind, kind)
self.assertEqual(ni.optlevel, optlevel)
self.assertEqual(ni.filters, filters)
class OldIndexTestCase(common.TestFileMixin, TestCase):
h5fname = test_filename("idx-std-1.x.h5")
def test1_x(self):
"""Check that files with 1.x indexes are recognized and warned."""
self.assertWarns(OldIndexWarning, self.h5file.get_node, "/table")
# Sensible parameters for indexing with small blocksizes
small_blocksizes = (512, 128, 32, 8)
class CompletelySortedIndexTestCase(TempFileMixin, TestCase):
"""Test case for testing a complete sort in a table."""
nrows = 100
nrowsinbuf = 11
class MyDescription(tables.IsDescription):
rcol = IntCol(pos=1)
icol = IntCol(pos=2)
def setUp(self):
super(CompletelySortedIndexTestCase, self).setUp()
table = self.h5file.create_table('/', 'table', self.MyDescription)
row = table.row
nrows = self.nrows
for i in range(nrows):
row['rcol'] = i
row['icol'] = nrows - i
row.append()
table.flush()
self.table = table
self.icol = self.table.cols.icol
# A full index with maximum optlevel should always be completely sorted
self.icol.create_csindex(_blocksizes=small_blocksizes)
def test00_isCompletelySortedIndex(self):
"""Testing the Column.is_csi property."""
icol = self.icol
self.assertEqual(icol.index.is_csi, True)
icol.remove_index()
# Other kinds than full, should never return a CSI
icol.create_index(kind="medium", optlevel=9)
self.assertEqual(icol.index.is_csi, False)
icol.remove_index()
# As the table is small, lesser optlevels should be able to
# create a completely sorted index too.
icol.create_index(kind="full", optlevel=6)
self.assertEqual(icol.index.is_csi, True)
# Checking a CSI in a sorted copy
self.table.copy("/", 'table2', sortby='icol', checkCSI=True)
self.assertEqual(icol.index.is_csi, True)
def test01_readSorted1(self):
"""Testing the Index.read_sorted() method with no arguments."""
icol = self.icol
sortedcol = numpy.sort(icol[:])
sortedcol2 = icol.index.read_sorted()
if verbose:
print("Original sorted column:", sortedcol)
print("The values from the index:", sortedcol2)
self.assertTrue(allequal(sortedcol, sortedcol2))
def test01_readSorted2(self):
"""Testing the Index.read_sorted() method with arguments (I)."""
icol = self.icol
sortedcol = numpy.sort(icol[:])[30:55]
sortedcol2 = icol.index.read_sorted(30, 55)
if verbose:
print("Original sorted column:", sortedcol)
print("The values from the index:", sortedcol2)
self.assertTrue(allequal(sortedcol, sortedcol2))
def test01_readSorted3(self):
"""Testing the Index.read_sorted() method with arguments (II)."""
icol = self.icol
sortedcol = numpy.sort(icol[:])[33:97]
sortedcol2 = icol.index.read_sorted(33, 97)
if verbose:
print("Original sorted column:", sortedcol)
print("The values from the index:", sortedcol2)
self.assertTrue(allequal(sortedcol, sortedcol2))
def test02_readIndices1(self):
"""Testing the Index.read_indices() method with no arguments."""
icol = self.icol
indicescol = numpy.argsort(icol[:]).astype('uint64')
indicescol2 = icol.index.read_indices()
if verbose:
print("Original indices column:", indicescol)
print("The values from the index:", indicescol2)
self.assertTrue(allequal(indicescol, indicescol2))
def test02_readIndices2(self):
"""Testing the Index.read_indices() method with arguments (I)."""
icol = self.icol
indicescol = numpy.argsort(icol[:])[30:55].astype('uint64')
indicescol2 = icol.index.read_indices(30, 55)
if verbose:
print("Original indices column:", indicescol)
print("The values from the index:", indicescol2)
self.assertTrue(allequal(indicescol, indicescol2))
def test02_readIndices3(self):
"""Testing the Index.read_indices() method with arguments (II)."""
icol = self.icol
indicescol = numpy.argsort(icol[:])[33:97].astype('uint64')
indicescol2 = icol.index.read_indices(33, 97)
if verbose:
print("Original indices column:", indicescol)
print("The values from the index:", indicescol2)
self.assertTrue(allequal(indicescol, indicescol2))
def test02_readIndices4(self):
"""Testing the Index.read_indices() method with arguments (III)."""
icol = self.icol
indicescol = numpy.argsort(icol[:])[33:97:2].astype('uint64')
indicescol2 = icol.index.read_indices(33, 97, 2)
if verbose:
print("Original indices column:", indicescol)
print("The values from the index:", indicescol2)
self.assertTrue(allequal(indicescol, indicescol2))
def test02_readIndices5(self):
"""Testing the Index.read_indices() method with arguments (IV)."""
icol = self.icol
indicescol = numpy.argsort(icol[:])[33:55:5].astype('uint64')
indicescol2 = icol.index.read_indices(33, 55, 5)
if verbose:
print("Original indices column:", indicescol)
print("The values from the index:", indicescol2)
self.assertTrue(allequal(indicescol, indicescol2))
def test02_readIndices6(self):
"""Testing the Index.read_indices() method with step only."""
icol = self.icol
indicescol = numpy.argsort(icol[:])[::3].astype('uint64')
indicescol2 = icol.index.read_indices(step=3)
if verbose:
print("Original indices column:", indicescol)
print("The values from the index:", indicescol2)
self.assertTrue(allequal(indicescol, indicescol2))
def test03_getitem1(self):
"""Testing the Index.__getitem__() method with no arguments."""
icol = self.icol
indicescol = numpy.argsort(icol[:]).astype('uint64')
indicescol2 = icol.index[:]
if verbose:
print("Original indices column:", indicescol)
print("The values from the index:", indicescol2)
self.assertTrue(allequal(indicescol, indicescol2))
def test03_getitem2(self):
"""Testing the Index.__getitem__() method with start."""
icol = self.icol
indicescol = numpy.argsort(icol[:])[31].astype('uint64')
indicescol2 = icol.index[31]
if verbose:
print("Original indices column:", indicescol)
print("The values from the index:", indicescol2)
self.assertTrue(allequal(indicescol, indicescol2))
def test03_getitem3(self):
"""Testing the Index.__getitem__() method with start, stop."""
icol = self.icol
indicescol = numpy.argsort(icol[:])[2:16].astype('uint64')
indicescol2 = icol.index[2:16]
if verbose:
print("Original indices column:", indicescol)
print("The values from the index:", indicescol2)
self.assertTrue(allequal(indicescol, indicescol2))
def test04_itersorted1(self):
"""Testing the Table.itersorted() method with no arguments."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')
sortedtable2 = numpy.array(
[row.fetch_all_fields() for row in table.itersorted(
'icol')], dtype=table._v_dtype)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from the iterator:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test04_itersorted2(self):
"""Testing the Table.itersorted() method with a start."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[15:]
sortedtable2 = numpy.array(
[row.fetch_all_fields() for row in table.itersorted(
'icol', start=15)], dtype=table._v_dtype)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from the iterator:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test04_itersorted3(self):
"""Testing the Table.itersorted() method with a stop."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[:20]
sortedtable2 = numpy.array(
[row.fetch_all_fields() for row in table.itersorted(
'icol', stop=20)], dtype=table._v_dtype)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from the iterator:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test04_itersorted4(self):
"""Testing the Table.itersorted() method with a start and stop."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[15:20]
sortedtable2 = numpy.array(
[row.fetch_all_fields() for row in table.itersorted(
'icol', start=15, stop=20)], dtype=table._v_dtype)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from the iterator:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test04_itersorted5(self):
"""Testing the Table.itersorted() method with a start, stop and
step."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[15:45:4]
sortedtable2 = numpy.array(
[row.fetch_all_fields() for row in table.itersorted(
'icol', start=15, stop=45, step=4)], dtype=table._v_dtype)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from the iterator:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test04_itersorted6(self):
"""Testing the Table.itersorted() method with a start, stop and
step."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[33:55:5]
sortedtable2 = numpy.array(
[row.fetch_all_fields() for row in table.itersorted(
'icol', start=33, stop=55, step=5)], dtype=table._v_dtype)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from the iterator:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test04_itersorted7(self):
"""Testing the Table.itersorted() method with checkCSI=True."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')
sortedtable2 = numpy.array(
[row.fetch_all_fields() for row in table.itersorted(
'icol', checkCSI=True)], dtype=table._v_dtype)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from the iterator:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test04_itersorted8(self):
"""Testing the Table.itersorted() method with a start, stop and
negative step."""
# see also gh-252
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[55:33:-5]
sortedtable2 = numpy.array(
[row.fetch_all_fields() for row in table.itersorted(
'icol', start=55, stop=33, step=-5)], dtype=table._v_dtype)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from the iterator:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test04_itersorted9(self):
"""Testing the Table.itersorted() method with a negative step."""
# see also gh-252
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[::-5]
sortedtable2 = numpy.array(
[row.fetch_all_fields() for row in table.itersorted(
'icol', step=-5)], dtype=table._v_dtype)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from the iterator:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05_readSorted1(self):
"""Testing the Table.read_sorted() method with no arguments."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')
sortedtable2 = table.read_sorted('icol')
if verbose:
print("Original sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05_readSorted2(self):
"""Testing the Table.read_sorted() method with a start."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[16:17]
sortedtable2 = table.read_sorted('icol', start=16)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05_readSorted3(self):
"""Testing the Table.read_sorted() method with a start and stop."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[16:33]
sortedtable2 = table.read_sorted('icol', start=16, stop=33)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05_readSorted4(self):
"""Testing the Table.read_sorted() method with a start, stop and
step."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[33:55:5]
sortedtable2 = table.read_sorted('icol', start=33, stop=55, step=5)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05_readSorted5(self):
"""Testing the Table.read_sorted() method with only a step."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[::3]
sortedtable2 = table.read_sorted('icol', step=3)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05_readSorted6(self):
"""Testing the Table.read_sorted() method with negative step."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[::-1]
sortedtable2 = table.read_sorted('icol', step=-1)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05_readSorted7(self):
"""Testing the Table.read_sorted() method with negative step (II)."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')[::-2]
sortedtable2 = table.read_sorted('icol', step=-2)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05_readSorted8(self):
"""Testing the Table.read_sorted() method with negative step (III))."""
table = self.table
sstart = 100-24-1
sstop = 100-54-1
sortedtable = numpy.sort(table[:], order='icol')[sstart:sstop:-1]
sortedtable2 = table.read_sorted('icol', start=24, stop=54, step=-1)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05_readSorted9(self):
"""Testing the Table.read_sorted() method with negative step (IV))."""
table = self.table
sstart = 100-14-1
sstop = 100-54-1
sortedtable = numpy.sort(table[:], order='icol')[sstart:sstop:-3]
sortedtable2 = table.read_sorted('icol', start=14, stop=54, step=-3)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05_readSorted10(self):
"""Testing the Table.read_sorted() method with negative step (V))."""
table = self.table
sstart = 100-24-1
sstop = 100-25-1
sortedtable = numpy.sort(table[:], order='icol')[sstart:sstop:-2]
sortedtable2 = table.read_sorted('icol', start=24, stop=25, step=-2)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05_readSorted11(self):
"""Testing the Table.read_sorted() method with start > stop."""
table = self.table
sstart = 100-137-1
sstop = 100-25-1
sortedtable = numpy.sort(table[:], order='icol')[sstart:sstop:-2]
sortedtable2 = table.read_sorted('icol', start=137, stop=25, step=-2)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05a_readSorted12(self):
"""Testing the Table.read_sorted() method with checkCSI (I)."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')
sortedtable2 = table.read_sorted('icol', checkCSI=True)
if verbose:
print("Original sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test05b_readSorted12(self):
"""Testing the Table.read_sorted() method with checkCSI (II)."""
table = self.table
self.assertRaises(ValueError,
table.read_sorted, "rcol", checkCSI=False)
def test06_copy_sorted1(self):
"""Testing the Table.copy(sortby) method with no arguments."""
table = self.table
# Copy to another table
table.nrowsinbuf = self.nrowsinbuf
table2 = table.copy("/", 'table2', sortby="icol")
sortedtable = numpy.sort(table[:], order='icol')
sortedtable2 = table2[:]
if verbose:
print("Original sorted table:", sortedtable)
print("The values from copy:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test06_copy_sorted2(self):
"""Testing the Table.copy(sortby) method with step=-1."""
table = self.table
# Copy to another table
table.nrowsinbuf = self.nrowsinbuf
table2 = table.copy("/", 'table2', sortby="icol", step=-1)
sortedtable = numpy.sort(table[:], order='icol')[::-1]
sortedtable2 = table2[:]
if verbose:
print("Original sorted table:", sortedtable)
print("The values from copy:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test06_copy_sorted3(self):
"""Testing the Table.copy(sortby) method with only a start."""
table = self.table
# Copy to another table
table.nrowsinbuf = self.nrowsinbuf
table2 = table.copy("/", 'table2', sortby="icol", start=3)
sortedtable = numpy.sort(table[:], order='icol')[3:4]
sortedtable2 = table2[:]
if verbose:
print("Original sorted table:", sortedtable)
print("The values from copy:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test06_copy_sorted4(self):
"""Testing the Table.copy(sortby) method with start, stop."""
table = self.table
# Copy to another table
table.nrowsinbuf = self.nrowsinbuf
table2 = table.copy("/", 'table2', sortby="icol", start=3, stop=40)
sortedtable = numpy.sort(table[:], order='icol')[3:40]
sortedtable2 = table2[:]
if verbose:
print("Original sorted table:", sortedtable)
print("The values from copy:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test06_copy_sorted5(self):
"""Testing the Table.copy(sortby) method with start, stop, step."""
table = self.table
# Copy to another table
table.nrowsinbuf = self.nrowsinbuf
table2 = table.copy("/", 'table2', sortby="icol",
start=3, stop=33, step=5)
sortedtable = numpy.sort(table[:], order='icol')[3:33:5]
sortedtable2 = table2[:]
if verbose:
print("Original sorted table:", sortedtable)
print("The values from copy:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test06_copy_sorted6(self):
"""Testing the Table.copy(sortby) method after table re-opening."""
self._reopen(mode='a')
table = self.h5file.root.table
# Copy to another table
table.nrowsinbuf = self.nrowsinbuf
table2 = table.copy("/", 'table2', sortby="icol")
sortedtable = numpy.sort(table[:], order='icol')
sortedtable2 = table2[:]
if verbose:
print("Original sorted table:", sortedtable)
print("The values from copy:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test06_copy_sorted7(self):
"""Testing the `checkCSI` parameter of Table.copy() (I)."""
table = self.table
# Copy to another table
table.nrowsinbuf = self.nrowsinbuf
table2 = table.copy("/", 'table2', sortby="icol")
self.assertRaises(ValueError,
table2.copy, "/", 'table3',
sortby="rcol", checkCSI=False)
def test06_copy_sorted8(self):
"""Testing the `checkCSI` parameter of Table.copy() (II)."""
table = self.table
# Copy to another table
table.nrowsinbuf = self.nrowsinbuf
table2 = table.copy("/", 'table2', sortby="icol")
self.assertRaises(ValueError,
table2.copy, "/", 'table3',
sortby="rcol", checkCSI=True)
def test07_isCSI_noelements(self):
"""Testing the representation of an index with no elements."""
t2 = self.h5file.create_table('/', 't2', self.MyDescription)
irows = t2.cols.rcol.create_csindex()
if verbose:
print("repr(t2)-->\n", repr(t2))
self.assertEqual(irows, 0)
self.assertEqual(t2.colindexes['rcol'].is_csi, False)
class ReadSortedIndexTestCase(TempFileMixin, TestCase):
"""Test case for testing sorted reading in a "full" sorted column."""
nrows = 100
nrowsinbuf = 11
class MyDescription(tables.IsDescription):
rcol = IntCol(pos=1)
icol = IntCol(pos=2)
def setUp(self):
super(ReadSortedIndexTestCase, self).setUp()
table = self.h5file.create_table('/', 'table', self.MyDescription)
row = table.row
nrows = self.nrows
for i in range(nrows):
row['rcol'] = i
row['icol'] = nrows - i
row.append()
table.flush()
self.table = table
self.icol = self.table.cols.icol
# A full index with maximum optlevel should always be completely sorted
self.icol.create_index(optlevel=self.optlevel, kind="full",
_blocksizes=small_blocksizes)
def test01_readSorted1(self):
"""Testing the Table.read_sorted() method with no arguments."""
table = self.table
sortedtable = numpy.sort(table[:], order='icol')
sortedtable2 = table.read_sorted('icol')
if verbose:
print("Sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
# Compare with the sorted read table because we have no
# guarantees that read_sorted returns a completely sorted table
self.assertTrue(allequal(sortedtable,
numpy.sort(sortedtable2, order="icol")))
def test01_readSorted2(self):
"""Testing the Table.read_sorted() method with no arguments
(re-open)."""
self._reopen()
table = self.h5file.root.table
sortedtable = numpy.sort(table[:], order='icol')
sortedtable2 = table.read_sorted('icol')
if verbose:
print("Sorted table:", sortedtable)
print("The values from read_sorted:", sortedtable2)
# Compare with the sorted read table because we have no
# guarantees that read_sorted returns a completely sorted table
self.assertTrue(allequal(sortedtable,
numpy.sort(sortedtable2, order="icol")))
def test02_copy_sorted1(self):
"""Testing the Table.copy(sortby) method."""
table = self.table
# Copy to another table
table.nrowsinbuf = self.nrowsinbuf
table2 = table.copy("/", 'table2', sortby="icol")
sortedtable = numpy.sort(table[:], order='icol')
sortedtable2 = numpy.sort(table2[:], order='icol')
if verbose:
print("Original table:", table2[:])
print("The sorted values from copy:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
def test02_copy_sorted2(self):
"""Testing the Table.copy(sortby) method after table re-opening."""
self._reopen(mode='a')
table = self.h5file.root.table
# Copy to another table
table.nrowsinbuf = self.nrowsinbuf
table2 = table.copy("/", 'table2', sortby="icol")
sortedtable = numpy.sort(table[:], order='icol')
sortedtable2 = numpy.sort(table2[:], order='icol')
if verbose:
print("Original table:", table2[:])
print("The sorted values from copy:", sortedtable2)
self.assertTrue(allequal(sortedtable, sortedtable2))
class ReadSortedIndex0(ReadSortedIndexTestCase):
optlevel = 0
class ReadSortedIndex3(ReadSortedIndexTestCase):
optlevel = 3
class ReadSortedIndex6(ReadSortedIndexTestCase):
optlevel = 6
class ReadSortedIndex9(ReadSortedIndexTestCase):
optlevel = 9
class Issue156TestBase(common.TempFileMixin, TestCase):
# field name in table according to which test_copysort() sorts the table
sort_field = None
def setUp(self):
super(Issue156TestBase, self).setUp()
# create nested table
class Foo(tables.IsDescription):
frame = UInt16Col()
class Bar(tables.IsDescription):
code = UInt16Col()
table = self.h5file.create_table('/', 'foo', Foo,
filters=tables.Filters(3, 'zlib'),
createparents=True)
self.h5file.flush()
# fill table with 10 random numbers
for k in range(10):
row = table.row
row['frame'] = numpy.random.random_integers(0, 2**16-1)
row['Bar/code'] = numpy.random.random_integers(0, 2**16-1)
row.append()
self.h5file.flush()
def test_copysort(self):
# copy table
oldNode = self.h5file.get_node('/foo')
# create completely sorted index on a main column
oldNode.colinstances[self.sort_field].create_csindex()
# this fails on ade2ba123efd267fd31
# see gh-156
new_node = oldNode.copy(newname='foo2', overwrite=True,
sortby=self.sort_field, checkCSI=True,
propindexes=True)
# check column is sorted
self.assertTrue(numpy.all(
new_node.col(self.sort_field) ==
sorted(oldNode.col(self.sort_field))))
# check index is available
self.assertTrue(self.sort_field in new_node.colindexes)
# check CSI was propagated
self.assertTrue(new_node.colindexes[self.sort_field].is_csi)
class Issue156TestCase01(Issue156TestBase):
# sort by field from non nested entry
sort_field = 'frame'
class Issue156TestCase02(Issue156TestBase):
# sort by field from nested entry
sort_field = 'Bar/code'
class Issue119Time32ColTestCase(common.TempFileMixin, TestCase):
"""TimeCol not properly indexing."""
col_typ = tables.Time32Col
values = [
0.93240451618785880,
0.76322375510776170,
0.16695030056300875,
0.91259117097807850,
0.93977847053454630,
0.51450406513503090,
0.24452129962257563,
0.85475938924825230,
0.32512326762476930,
0.75127635627046820,
]
def setUp(self):
super(Issue119Time32ColTestCase, self).setUp()
class Descr(tables.IsDescription):
when = self.col_typ(pos=1)
value = Float32Col(pos=2)
self.table = self.h5file.create_table('/', 'test', Descr)
self.t = 1321031471.0 # 11/11/11 11:11:11
data = [(self.t + i, item) for i, item in enumerate(self.values)]
self.table.append(data)
self.h5file.flush()
def test_timecol_issue(self):
tbl = self.table
t = self.t
wherestr = '(when >= %d) & (when < %d)' % (t, t + 5)
no_index = tbl.read_where(wherestr)
tbl.cols.when.create_index(_verbose=False)
with_index = tbl.read_where(wherestr)
self.assertTrue((no_index == with_index).all())
class Issue119Time64ColTestCase(Issue119Time32ColTestCase):
col_typ = tables.Time64Col
class TestIndexingNans(TempFileMixin, TestCase):
def test_issue_282(self):
trMap = {'index': Int64Col(), 'values': FloatCol()}
table = self.h5file.create_table('/', 'table', trMap)
r = table.row
for i in range(5):
r['index'] = i
r['values'] = numpy.nan if i == 0 else i
r.append()
table.flush()
table.cols.values.create_index()
# retrieve
result = table.read_where('(values >= 0)')
self.assertTrue(len(result) == 4)
def test_issue_327(self):
table = self.h5file.create_table('/', 'table', dict(
index=Int64Col(),
values=FloatCol(shape=()),
values2=FloatCol(shape=()),
))
r = table.row
for i in range(5):
r['index'] = i
r['values'] = numpy.nan if i == 2 or i == 3 else i
r['values2'] = i
r.append()
table.flush()
table.cols.values.create_index()
table.cols.values2.create_index()
results2 = table.read_where('(values2 > 0)')
self.assertTrue(len(results2), 4)
results = table.read_where('(values > 0)')
self.assertEqual(len(results), 2)
def test_issue_327_b(self):
table = self.h5file.create_table('/', 'table', dict(
index=Int64Col(),
values=FloatCol(shape=()),
values2=FloatCol(shape=()),
))
r = table.row
for _ in range(100):
for i in range(5):
r['index'] = i
r['values'] = numpy.nan if i == 2 or i == 3 else i
r['values2'] = i
r.append()
table.flush()
table.cols.values.create_index(_blocksizes=small_blocksizes)
table.cols.values2.create_index(_blocksizes=small_blocksizes)
results2 = table.read_where('(values2 > 0)')
self.assertTrue(len(results2), 400)
results = table.read_where('(values > 0)')
self.assertEqual(len(results), 200)
def test_csindex_nans(self):
table = self.h5file.create_table('/', 'table', dict(
index=Int64Col(),
values=FloatCol(shape=()),
values2=FloatCol(shape=()),
))
r = table.row
for x in range(100):
for i in range(5):
r['index'] = i
r['values'] = numpy.nan if i == 2 or i == 3 else i
r['values2'] = i
r.append()
table.flush()
table.cols.values.create_csindex(_blocksizes=small_blocksizes)
table.cols.values2.create_csindex(_blocksizes=small_blocksizes)
results2 = table.read_where('(values2 > 0)')
self.assertTrue(len(results2), 100*4)
results = table.read_where('(values > 0)')
self.assertEqual(len(results), 100*2)
def suite():
theSuite = unittest.TestSuite()
niter = 1
# heavy = 1 # Uncomment this only for testing purposes!
for n in range(niter):
theSuite.addTest(unittest.makeSuite(BasicReadTestCase))
theSuite.addTest(unittest.makeSuite(ZlibReadTestCase))
theSuite.addTest(unittest.makeSuite(BloscReadTestCase))
theSuite.addTest(unittest.makeSuite(LZOReadTestCase))
theSuite.addTest(unittest.makeSuite(Bzip2ReadTestCase))
theSuite.addTest(unittest.makeSuite(ShuffleReadTestCase))
theSuite.addTest(unittest.makeSuite(Fletcher32ReadTestCase))
theSuite.addTest(unittest.makeSuite(ShuffleFletcher32ReadTestCase))
theSuite.addTest(unittest.makeSuite(OneHalfTestCase))
theSuite.addTest(unittest.makeSuite(UpperBoundTestCase))
theSuite.addTest(unittest.makeSuite(LowerBoundTestCase))
theSuite.addTest(unittest.makeSuite(AI1TestCase))
theSuite.addTest(unittest.makeSuite(AI2TestCase))
theSuite.addTest(unittest.makeSuite(AI9TestCase))
theSuite.addTest(unittest.makeSuite(DeepTableIndexTestCase))
theSuite.addTest(unittest.makeSuite(IndexPropsChangeTestCase))
theSuite.addTest(unittest.makeSuite(IndexFiltersTestCase))
theSuite.addTest(unittest.makeSuite(OldIndexTestCase))
theSuite.addTest(unittest.makeSuite(CompletelySortedIndexTestCase))
theSuite.addTest(unittest.makeSuite(ManyNodesTestCase))
theSuite.addTest(unittest.makeSuite(ReadSortedIndex0))
theSuite.addTest(unittest.makeSuite(ReadSortedIndex3))
theSuite.addTest(unittest.makeSuite(ReadSortedIndex6))
theSuite.addTest(unittest.makeSuite(ReadSortedIndex9))
theSuite.addTest(unittest.makeSuite(Issue156TestCase01))
theSuite.addTest(unittest.makeSuite(Issue156TestCase02))
theSuite.addTest(unittest.makeSuite(Issue119Time32ColTestCase))
theSuite.addTest(unittest.makeSuite(Issue119Time64ColTestCase))
theSuite.addTest(unittest.makeSuite(TestIndexingNans))
if heavy:
# These are too heavy for normal testing
theSuite.addTest(unittest.makeSuite(AI4bTestCase))
theSuite.addTest(unittest.makeSuite(AI5TestCase))
theSuite.addTest(unittest.makeSuite(AI6TestCase))
theSuite.addTest(unittest.makeSuite(AI7TestCase))
theSuite.addTest(unittest.makeSuite(AI8TestCase))
theSuite.addTest(unittest.makeSuite(AI10TestCase))
theSuite.addTest(unittest.makeSuite(AI11TestCase))
theSuite.addTest(unittest.makeSuite(AI12TestCase))
return theSuite
if __name__ == '__main__':
import sys
common.parse_argv(sys.argv)
common.print_versions()
unittest.main(defaultTest='suite')
| bsd-3-clause |
codeworldprodigy/lab4 | lib/jinja2/jinja2/filters.py | 598 | 29836 | # -*- coding: utf-8 -*-
"""
jinja2.filters
~~~~~~~~~~~~~~
Bundled jinja filters.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
import math
from random import choice
from operator import itemgetter
from itertools import groupby
from jinja2.utils import Markup, escape, pformat, urlize, soft_unicode, \
unicode_urlencode
from jinja2.runtime import Undefined
from jinja2.exceptions import FilterArgumentError
from jinja2._compat import next, imap, string_types, text_type, iteritems
_word_re = re.compile(r'\w+(?u)')
def contextfilter(f):
"""Decorator for marking context dependent filters. The current
:class:`Context` will be passed as first argument.
"""
f.contextfilter = True
return f
def evalcontextfilter(f):
"""Decorator for marking eval-context dependent filters. An eval
context object is passed as first argument. For more information
about the eval context, see :ref:`eval-context`.
.. versionadded:: 2.4
"""
f.evalcontextfilter = True
return f
def environmentfilter(f):
"""Decorator for marking evironment dependent filters. The current
:class:`Environment` is passed to the filter as first argument.
"""
f.environmentfilter = True
return f
def make_attrgetter(environment, attribute):
"""Returns a callable that looks up the given attribute from a
passed object with the rules of the environment. Dots are allowed
to access attributes of attributes. Integer parts in paths are
looked up as integers.
"""
if not isinstance(attribute, string_types) \
or ('.' not in attribute and not attribute.isdigit()):
return lambda x: environment.getitem(x, attribute)
attribute = attribute.split('.')
def attrgetter(item):
for part in attribute:
if part.isdigit():
part = int(part)
item = environment.getitem(item, part)
return item
return attrgetter
def do_forceescape(value):
"""Enforce HTML escaping. This will probably double escape variables."""
if hasattr(value, '__html__'):
value = value.__html__()
return escape(text_type(value))
def do_urlencode(value):
"""Escape strings for use in URLs (uses UTF-8 encoding). It accepts both
dictionaries and regular strings as well as pairwise iterables.
.. versionadded:: 2.7
"""
itemiter = None
if isinstance(value, dict):
itemiter = iteritems(value)
elif not isinstance(value, string_types):
try:
itemiter = iter(value)
except TypeError:
pass
if itemiter is None:
return unicode_urlencode(value)
return u'&'.join(unicode_urlencode(k) + '=' +
unicode_urlencode(v) for k, v in itemiter)
@evalcontextfilter
def do_replace(eval_ctx, s, old, new, count=None):
"""Return a copy of the value with all occurrences of a substring
replaced with a new one. The first argument is the substring
that should be replaced, the second is the replacement string.
If the optional third argument ``count`` is given, only the first
``count`` occurrences are replaced:
.. sourcecode:: jinja
{{ "Hello World"|replace("Hello", "Goodbye") }}
-> Goodbye World
{{ "aaaaargh"|replace("a", "d'oh, ", 2) }}
-> d'oh, d'oh, aaargh
"""
if count is None:
count = -1
if not eval_ctx.autoescape:
return text_type(s).replace(text_type(old), text_type(new), count)
if hasattr(old, '__html__') or hasattr(new, '__html__') and \
not hasattr(s, '__html__'):
s = escape(s)
else:
s = soft_unicode(s)
return s.replace(soft_unicode(old), soft_unicode(new), count)
def do_upper(s):
"""Convert a value to uppercase."""
return soft_unicode(s).upper()
def do_lower(s):
"""Convert a value to lowercase."""
return soft_unicode(s).lower()
@evalcontextfilter
def do_xmlattr(_eval_ctx, d, autospace=True):
"""Create an SGML/XML attribute string based on the items in a dict.
All values that are neither `none` nor `undefined` are automatically
escaped:
.. sourcecode:: html+jinja
<ul{{ {'class': 'my_list', 'missing': none,
'id': 'list-%d'|format(variable)}|xmlattr }}>
...
</ul>
Results in something like this:
.. sourcecode:: html
<ul class="my_list" id="list-42">
...
</ul>
As you can see it automatically prepends a space in front of the item
if the filter returned something unless the second parameter is false.
"""
rv = u' '.join(
u'%s="%s"' % (escape(key), escape(value))
for key, value in iteritems(d)
if value is not None and not isinstance(value, Undefined)
)
if autospace and rv:
rv = u' ' + rv
if _eval_ctx.autoescape:
rv = Markup(rv)
return rv
def do_capitalize(s):
"""Capitalize a value. The first character will be uppercase, all others
lowercase.
"""
return soft_unicode(s).capitalize()
def do_title(s):
"""Return a titlecased version of the value. I.e. words will start with
uppercase letters, all remaining characters are lowercase.
"""
rv = []
for item in re.compile(r'([-\s]+)(?u)').split(s):
if not item:
continue
rv.append(item[0].upper() + item[1:].lower())
return ''.join(rv)
def do_dictsort(value, case_sensitive=False, by='key'):
"""Sort a dict and yield (key, value) pairs. Because python dicts are
unsorted you may want to use this function to order them by either
key or value:
.. sourcecode:: jinja
{% for item in mydict|dictsort %}
sort the dict by key, case insensitive
{% for item in mydict|dictsort(true) %}
sort the dict by key, case sensitive
{% for item in mydict|dictsort(false, 'value') %}
sort the dict by key, case insensitive, sorted
normally and ordered by value.
"""
if by == 'key':
pos = 0
elif by == 'value':
pos = 1
else:
raise FilterArgumentError('You can only sort by either '
'"key" or "value"')
def sort_func(item):
value = item[pos]
if isinstance(value, string_types) and not case_sensitive:
value = value.lower()
return value
return sorted(value.items(), key=sort_func)
@environmentfilter
def do_sort(environment, value, reverse=False, case_sensitive=False,
attribute=None):
"""Sort an iterable. Per default it sorts ascending, if you pass it
true as first argument it will reverse the sorting.
If the iterable is made of strings the third parameter can be used to
control the case sensitiveness of the comparison which is disabled by
default.
.. sourcecode:: jinja
{% for item in iterable|sort %}
...
{% endfor %}
It is also possible to sort by an attribute (for example to sort
by the date of an object) by specifying the `attribute` parameter:
.. sourcecode:: jinja
{% for item in iterable|sort(attribute='date') %}
...
{% endfor %}
.. versionchanged:: 2.6
The `attribute` parameter was added.
"""
if not case_sensitive:
def sort_func(item):
if isinstance(item, string_types):
item = item.lower()
return item
else:
sort_func = None
if attribute is not None:
getter = make_attrgetter(environment, attribute)
def sort_func(item, processor=sort_func or (lambda x: x)):
return processor(getter(item))
return sorted(value, key=sort_func, reverse=reverse)
def do_default(value, default_value=u'', boolean=False):
"""If the value is undefined it will return the passed default value,
otherwise the value of the variable:
.. sourcecode:: jinja
{{ my_variable|default('my_variable is not defined') }}
This will output the value of ``my_variable`` if the variable was
defined, otherwise ``'my_variable is not defined'``. If you want
to use default with variables that evaluate to false you have to
set the second parameter to `true`:
.. sourcecode:: jinja
{{ ''|default('the string was empty', true) }}
"""
if isinstance(value, Undefined) or (boolean and not value):
return default_value
return value
@evalcontextfilter
def do_join(eval_ctx, value, d=u'', attribute=None):
"""Return a string which is the concatenation of the strings in the
sequence. The separator between elements is an empty string per
default, you can define it with the optional parameter:
.. sourcecode:: jinja
{{ [1, 2, 3]|join('|') }}
-> 1|2|3
{{ [1, 2, 3]|join }}
-> 123
It is also possible to join certain attributes of an object:
.. sourcecode:: jinja
{{ users|join(', ', attribute='username') }}
.. versionadded:: 2.6
The `attribute` parameter was added.
"""
if attribute is not None:
value = imap(make_attrgetter(eval_ctx.environment, attribute), value)
# no automatic escaping? joining is a lot eaiser then
if not eval_ctx.autoescape:
return text_type(d).join(imap(text_type, value))
# if the delimiter doesn't have an html representation we check
# if any of the items has. If yes we do a coercion to Markup
if not hasattr(d, '__html__'):
value = list(value)
do_escape = False
for idx, item in enumerate(value):
if hasattr(item, '__html__'):
do_escape = True
else:
value[idx] = text_type(item)
if do_escape:
d = escape(d)
else:
d = text_type(d)
return d.join(value)
# no html involved, to normal joining
return soft_unicode(d).join(imap(soft_unicode, value))
def do_center(value, width=80):
"""Centers the value in a field of a given width."""
return text_type(value).center(width)
@environmentfilter
def do_first(environment, seq):
"""Return the first item of a sequence."""
try:
return next(iter(seq))
except StopIteration:
return environment.undefined('No first item, sequence was empty.')
@environmentfilter
def do_last(environment, seq):
"""Return the last item of a sequence."""
try:
return next(iter(reversed(seq)))
except StopIteration:
return environment.undefined('No last item, sequence was empty.')
@environmentfilter
def do_random(environment, seq):
"""Return a random item from the sequence."""
try:
return choice(seq)
except IndexError:
return environment.undefined('No random item, sequence was empty.')
def do_filesizeformat(value, binary=False):
"""Format the value like a 'human-readable' file size (i.e. 13 kB,
4.1 MB, 102 Bytes, etc). Per default decimal prefixes are used (Mega,
Giga, etc.), if the second parameter is set to `True` the binary
prefixes are used (Mebi, Gibi).
"""
bytes = float(value)
base = binary and 1024 or 1000
prefixes = [
(binary and 'KiB' or 'kB'),
(binary and 'MiB' or 'MB'),
(binary and 'GiB' or 'GB'),
(binary and 'TiB' or 'TB'),
(binary and 'PiB' or 'PB'),
(binary and 'EiB' or 'EB'),
(binary and 'ZiB' or 'ZB'),
(binary and 'YiB' or 'YB')
]
if bytes == 1:
return '1 Byte'
elif bytes < base:
return '%d Bytes' % bytes
else:
for i, prefix in enumerate(prefixes):
unit = base ** (i + 2)
if bytes < unit:
return '%.1f %s' % ((base * bytes / unit), prefix)
return '%.1f %s' % ((base * bytes / unit), prefix)
def do_pprint(value, verbose=False):
"""Pretty print a variable. Useful for debugging.
With Jinja 1.2 onwards you can pass it a parameter. If this parameter
is truthy the output will be more verbose (this requires `pretty`)
"""
return pformat(value, verbose=verbose)
@evalcontextfilter
def do_urlize(eval_ctx, value, trim_url_limit=None, nofollow=False):
"""Converts URLs in plain text into clickable links.
If you pass the filter an additional integer it will shorten the urls
to that number. Also a third argument exists that makes the urls
"nofollow":
.. sourcecode:: jinja
{{ mytext|urlize(40, true) }}
links are shortened to 40 chars and defined with rel="nofollow"
"""
rv = urlize(value, trim_url_limit, nofollow)
if eval_ctx.autoescape:
rv = Markup(rv)
return rv
def do_indent(s, width=4, indentfirst=False):
"""Return a copy of the passed string, each line indented by
4 spaces. The first line is not indented. If you want to
change the number of spaces or indent the first line too
you can pass additional parameters to the filter:
.. sourcecode:: jinja
{{ mytext|indent(2, true) }}
indent by two spaces and indent the first line too.
"""
indention = u' ' * width
rv = (u'\n' + indention).join(s.splitlines())
if indentfirst:
rv = indention + rv
return rv
def do_truncate(s, length=255, killwords=False, end='...'):
"""Return a truncated copy of the string. The length is specified
with the first parameter which defaults to ``255``. If the second
parameter is ``true`` the filter will cut the text at length. Otherwise
it will discard the last word. If the text was in fact
truncated it will append an ellipsis sign (``"..."``). If you want a
different ellipsis sign than ``"..."`` you can specify it using the
third parameter.
.. sourcecode:: jinja
{{ "foo bar"|truncate(5) }}
-> "foo ..."
{{ "foo bar"|truncate(5, True) }}
-> "foo b..."
"""
if len(s) <= length:
return s
elif killwords:
return s[:length] + end
words = s.split(' ')
result = []
m = 0
for word in words:
m += len(word) + 1
if m > length:
break
result.append(word)
result.append(end)
return u' '.join(result)
@environmentfilter
def do_wordwrap(environment, s, width=79, break_long_words=True,
wrapstring=None):
"""
Return a copy of the string passed to the filter wrapped after
``79`` characters. You can override this default using the first
parameter. If you set the second parameter to `false` Jinja will not
split words apart if they are longer than `width`. By default, the newlines
will be the default newlines for the environment, but this can be changed
using the wrapstring keyword argument.
.. versionadded:: 2.7
Added support for the `wrapstring` parameter.
"""
if not wrapstring:
wrapstring = environment.newline_sequence
import textwrap
return wrapstring.join(textwrap.wrap(s, width=width, expand_tabs=False,
replace_whitespace=False,
break_long_words=break_long_words))
def do_wordcount(s):
"""Count the words in that string."""
return len(_word_re.findall(s))
def do_int(value, default=0):
"""Convert the value into an integer. If the
conversion doesn't work it will return ``0``. You can
override this default using the first parameter.
"""
try:
return int(value)
except (TypeError, ValueError):
# this quirk is necessary so that "42.23"|int gives 42.
try:
return int(float(value))
except (TypeError, ValueError):
return default
def do_float(value, default=0.0):
"""Convert the value into a floating point number. If the
conversion doesn't work it will return ``0.0``. You can
override this default using the first parameter.
"""
try:
return float(value)
except (TypeError, ValueError):
return default
def do_format(value, *args, **kwargs):
"""
Apply python string formatting on an object:
.. sourcecode:: jinja
{{ "%s - %s"|format("Hello?", "Foo!") }}
-> Hello? - Foo!
"""
if args and kwargs:
raise FilterArgumentError('can\'t handle positional and keyword '
'arguments at the same time')
return soft_unicode(value) % (kwargs or args)
def do_trim(value):
"""Strip leading and trailing whitespace."""
return soft_unicode(value).strip()
def do_striptags(value):
"""Strip SGML/XML tags and replace adjacent whitespace by one space.
"""
if hasattr(value, '__html__'):
value = value.__html__()
return Markup(text_type(value)).striptags()
def do_slice(value, slices, fill_with=None):
"""Slice an iterator and return a list of lists containing
those items. Useful if you want to create a div containing
three ul tags that represent columns:
.. sourcecode:: html+jinja
<div class="columwrapper">
{%- for column in items|slice(3) %}
<ul class="column-{{ loop.index }}">
{%- for item in column %}
<li>{{ item }}</li>
{%- endfor %}
</ul>
{%- endfor %}
</div>
If you pass it a second argument it's used to fill missing
values on the last iteration.
"""
seq = list(value)
length = len(seq)
items_per_slice = length // slices
slices_with_extra = length % slices
offset = 0
for slice_number in range(slices):
start = offset + slice_number * items_per_slice
if slice_number < slices_with_extra:
offset += 1
end = offset + (slice_number + 1) * items_per_slice
tmp = seq[start:end]
if fill_with is not None and slice_number >= slices_with_extra:
tmp.append(fill_with)
yield tmp
def do_batch(value, linecount, fill_with=None):
"""
A filter that batches items. It works pretty much like `slice`
just the other way round. It returns a list of lists with the
given number of items. If you provide a second parameter this
is used to fill up missing items. See this example:
.. sourcecode:: html+jinja
<table>
{%- for row in items|batch(3, ' ') %}
<tr>
{%- for column in row %}
<td>{{ column }}</td>
{%- endfor %}
</tr>
{%- endfor %}
</table>
"""
result = []
tmp = []
for item in value:
if len(tmp) == linecount:
yield tmp
tmp = []
tmp.append(item)
if tmp:
if fill_with is not None and len(tmp) < linecount:
tmp += [fill_with] * (linecount - len(tmp))
yield tmp
def do_round(value, precision=0, method='common'):
"""Round the number to a given precision. The first
parameter specifies the precision (default is ``0``), the
second the rounding method:
- ``'common'`` rounds either up or down
- ``'ceil'`` always rounds up
- ``'floor'`` always rounds down
If you don't specify a method ``'common'`` is used.
.. sourcecode:: jinja
{{ 42.55|round }}
-> 43.0
{{ 42.55|round(1, 'floor') }}
-> 42.5
Note that even if rounded to 0 precision, a float is returned. If
you need a real integer, pipe it through `int`:
.. sourcecode:: jinja
{{ 42.55|round|int }}
-> 43
"""
if not method in ('common', 'ceil', 'floor'):
raise FilterArgumentError('method must be common, ceil or floor')
if method == 'common':
return round(value, precision)
func = getattr(math, method)
return func(value * (10 ** precision)) / (10 ** precision)
@environmentfilter
def do_groupby(environment, value, attribute):
"""Group a sequence of objects by a common attribute.
If you for example have a list of dicts or objects that represent persons
with `gender`, `first_name` and `last_name` attributes and you want to
group all users by genders you can do something like the following
snippet:
.. sourcecode:: html+jinja
<ul>
{% for group in persons|groupby('gender') %}
<li>{{ group.grouper }}<ul>
{% for person in group.list %}
<li>{{ person.first_name }} {{ person.last_name }}</li>
{% endfor %}</ul></li>
{% endfor %}
</ul>
Additionally it's possible to use tuple unpacking for the grouper and
list:
.. sourcecode:: html+jinja
<ul>
{% for grouper, list in persons|groupby('gender') %}
...
{% endfor %}
</ul>
As you can see the item we're grouping by is stored in the `grouper`
attribute and the `list` contains all the objects that have this grouper
in common.
.. versionchanged:: 2.6
It's now possible to use dotted notation to group by the child
attribute of another attribute.
"""
expr = make_attrgetter(environment, attribute)
return sorted(map(_GroupTuple, groupby(sorted(value, key=expr), expr)))
class _GroupTuple(tuple):
__slots__ = ()
grouper = property(itemgetter(0))
list = property(itemgetter(1))
def __new__(cls, xxx_todo_changeme):
(key, value) = xxx_todo_changeme
return tuple.__new__(cls, (key, list(value)))
@environmentfilter
def do_sum(environment, iterable, attribute=None, start=0):
"""Returns the sum of a sequence of numbers plus the value of parameter
'start' (which defaults to 0). When the sequence is empty it returns
start.
It is also possible to sum up only certain attributes:
.. sourcecode:: jinja
Total: {{ items|sum(attribute='price') }}
.. versionchanged:: 2.6
The `attribute` parameter was added to allow suming up over
attributes. Also the `start` parameter was moved on to the right.
"""
if attribute is not None:
iterable = imap(make_attrgetter(environment, attribute), iterable)
return sum(iterable, start)
def do_list(value):
"""Convert the value into a list. If it was a string the returned list
will be a list of characters.
"""
return list(value)
def do_mark_safe(value):
"""Mark the value as safe which means that in an environment with automatic
escaping enabled this variable will not be escaped.
"""
return Markup(value)
def do_mark_unsafe(value):
"""Mark a value as unsafe. This is the reverse operation for :func:`safe`."""
return text_type(value)
def do_reverse(value):
"""Reverse the object or return an iterator the iterates over it the other
way round.
"""
if isinstance(value, string_types):
return value[::-1]
try:
return reversed(value)
except TypeError:
try:
rv = list(value)
rv.reverse()
return rv
except TypeError:
raise FilterArgumentError('argument must be iterable')
@environmentfilter
def do_attr(environment, obj, name):
"""Get an attribute of an object. ``foo|attr("bar")`` works like
``foo["bar"]`` just that always an attribute is returned and items are not
looked up.
See :ref:`Notes on subscriptions <notes-on-subscriptions>` for more details.
"""
try:
name = str(name)
except UnicodeError:
pass
else:
try:
value = getattr(obj, name)
except AttributeError:
pass
else:
if environment.sandboxed and not \
environment.is_safe_attribute(obj, name, value):
return environment.unsafe_undefined(obj, name)
return value
return environment.undefined(obj=obj, name=name)
@contextfilter
def do_map(*args, **kwargs):
"""Applies a filter on a sequence of objects or looks up an attribute.
This is useful when dealing with lists of objects but you are really
only interested in a certain value of it.
The basic usage is mapping on an attribute. Imagine you have a list
of users but you are only interested in a list of usernames:
.. sourcecode:: jinja
Users on this page: {{ users|map(attribute='username')|join(', ') }}
Alternatively you can let it invoke a filter by passing the name of the
filter and the arguments afterwards. A good example would be applying a
text conversion filter on a sequence:
.. sourcecode:: jinja
Users on this page: {{ titles|map('lower')|join(', ') }}
.. versionadded:: 2.7
"""
context = args[0]
seq = args[1]
if len(args) == 2 and 'attribute' in kwargs:
attribute = kwargs.pop('attribute')
if kwargs:
raise FilterArgumentError('Unexpected keyword argument %r' %
next(iter(kwargs)))
func = make_attrgetter(context.environment, attribute)
else:
try:
name = args[2]
args = args[3:]
except LookupError:
raise FilterArgumentError('map requires a filter argument')
func = lambda item: context.environment.call_filter(
name, item, args, kwargs, context=context)
if seq:
for item in seq:
yield func(item)
@contextfilter
def do_select(*args, **kwargs):
"""Filters a sequence of objects by appying a test to either the object
or the attribute and only selecting the ones with the test succeeding.
Example usage:
.. sourcecode:: jinja
{{ numbers|select("odd") }}
.. versionadded:: 2.7
"""
return _select_or_reject(args, kwargs, lambda x: x, False)
@contextfilter
def do_reject(*args, **kwargs):
"""Filters a sequence of objects by appying a test to either the object
or the attribute and rejecting the ones with the test succeeding.
Example usage:
.. sourcecode:: jinja
{{ numbers|reject("odd") }}
.. versionadded:: 2.7
"""
return _select_or_reject(args, kwargs, lambda x: not x, False)
@contextfilter
def do_selectattr(*args, **kwargs):
"""Filters a sequence of objects by appying a test to either the object
or the attribute and only selecting the ones with the test succeeding.
Example usage:
.. sourcecode:: jinja
{{ users|selectattr("is_active") }}
{{ users|selectattr("email", "none") }}
.. versionadded:: 2.7
"""
return _select_or_reject(args, kwargs, lambda x: x, True)
@contextfilter
def do_rejectattr(*args, **kwargs):
"""Filters a sequence of objects by appying a test to either the object
or the attribute and rejecting the ones with the test succeeding.
.. sourcecode:: jinja
{{ users|rejectattr("is_active") }}
{{ users|rejectattr("email", "none") }}
.. versionadded:: 2.7
"""
return _select_or_reject(args, kwargs, lambda x: not x, True)
def _select_or_reject(args, kwargs, modfunc, lookup_attr):
context = args[0]
seq = args[1]
if lookup_attr:
try:
attr = args[2]
except LookupError:
raise FilterArgumentError('Missing parameter for attribute name')
transfunc = make_attrgetter(context.environment, attr)
off = 1
else:
off = 0
transfunc = lambda x: x
try:
name = args[2 + off]
args = args[3 + off:]
func = lambda item: context.environment.call_test(
name, item, args, kwargs)
except LookupError:
func = bool
if seq:
for item in seq:
if modfunc(func(transfunc(item))):
yield item
FILTERS = {
'attr': do_attr,
'replace': do_replace,
'upper': do_upper,
'lower': do_lower,
'escape': escape,
'e': escape,
'forceescape': do_forceescape,
'capitalize': do_capitalize,
'title': do_title,
'default': do_default,
'd': do_default,
'join': do_join,
'count': len,
'dictsort': do_dictsort,
'sort': do_sort,
'length': len,
'reverse': do_reverse,
'center': do_center,
'indent': do_indent,
'title': do_title,
'capitalize': do_capitalize,
'first': do_first,
'last': do_last,
'map': do_map,
'random': do_random,
'reject': do_reject,
'rejectattr': do_rejectattr,
'filesizeformat': do_filesizeformat,
'pprint': do_pprint,
'truncate': do_truncate,
'wordwrap': do_wordwrap,
'wordcount': do_wordcount,
'int': do_int,
'float': do_float,
'string': soft_unicode,
'list': do_list,
'urlize': do_urlize,
'format': do_format,
'trim': do_trim,
'striptags': do_striptags,
'select': do_select,
'selectattr': do_selectattr,
'slice': do_slice,
'batch': do_batch,
'sum': do_sum,
'abs': abs,
'round': do_round,
'groupby': do_groupby,
'safe': do_mark_safe,
'xmlattr': do_xmlattr,
'urlencode': do_urlencode
}
| apache-2.0 |
intel-analytics/analytics-zoo | pyzoo/zoo/orca/learn/horovod/horovod_ray_runner.py | 1 | 5855 | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ray
import os
class HorovodWorker:
def ip_addr(self):
import ray
return ray._private.services.get_node_ip_address()
def set_gloo_iface(self):
ip_addr = self.ip_addr()
import psutil
import socket
iface_name = None
for intf, intf_addresses in psutil.net_if_addrs().items():
for addr in intf_addresses:
if addr.family == socket.AF_INET and addr.address == ip_addr:
iface_name = intf
assert iface_name is not None, "Cannot find network interface with ip {}".format(ip_addr)
os.environ["HOROVOD_GLOO_IFACE"] = iface_name
return iface_name
def run(self, env, func):
import os
os.environ.update(env)
return func()
def _hosts_to_hosts_spec(hosts):
host_to_size = {}
host_and_rank_to_worker_idx = {}
for i, host in enumerate(hosts):
if host not in host_to_size:
host_to_size[host] = 0
else:
host_to_size[host] = host_to_size[host] + 1
host_and_rank_to_worker_idx[(host, host_to_size[host])] = i
for key in host_to_size:
host_to_size[key] += 1
hosts_spec = ["{}:{}".format(key, host_to_size[key]) for key in host_to_size]
return hosts_spec, host_and_rank_to_worker_idx, host_to_size
def make_worker(worker_cls, HorovodWorker):
if worker_cls is None:
return HorovodWorker
if issubclass(worker_cls, HorovodWorker):
return worker_cls
class Worker(worker_cls, HorovodWorker):
pass
return Worker
def get_horovod_version():
import horovod
major, minor, patch = horovod.__version__.split(".")
return int(major), int(minor), int(patch), horovod.__version__
class HorovodRayRunner:
# todo check whether horovod is built with gloo
def __init__(self, ray_ctx, worker_cls=None, worker_param=None, workers_per_node=1):
self.cores_per_node = ray_ctx.ray_node_cpu_cores // workers_per_node
self.num_nodes = ray_ctx.num_ray_nodes * workers_per_node
if worker_param is None:
worker_param = {}
worker_cls = make_worker(worker_cls, HorovodWorker)
self.worker_class = ray.remote(num_cpus=self.cores_per_node)(worker_cls)
self.remote_workers = [self.worker_class.remote(**worker_param)
for i in range(0, self.num_nodes)]
hosts = ray.get([worker.ip_addr.remote() for worker in self.remote_workers])
hosts_spec, name_rank_to_id, host_to_size = _hosts_to_hosts_spec(hosts)
major, minor, patch, version_str = get_horovod_version()
if major == 0 and minor < 19:
raise RuntimeError(f"We only support horovod versions newer "
f"than 0.19.0, but got {version_str}")
if major == 0 and minor == 19:
from horovod.run.gloo_run import RendezvousServer, _allocate
self.host_alloc_plan = _allocate(",".join(hosts_spec), self.num_nodes)
self.global_rendezv = RendezvousServer(True)
global_rendezv_port = self.global_rendezv.start_server(self.host_alloc_plan)
else:
from horovod.runner.gloo_run import RendezvousServer, parse_hosts, get_host_assignments
self.host_alloc_plan = get_host_assignments(parse_hosts(",".join(hosts_spec)),
self.num_nodes)
self.global_rendezv = RendezvousServer(True)
global_rendezv_port = self.global_rendezv.start()
self.global_rendezv.init(self.host_alloc_plan)
driver_ip = ray._private.services.get_node_ip_address()
common_envs = {
"HOROVOD_GLOO_RENDEZVOUS_ADDR": driver_ip,
"HOROVOD_GLOO_RENDEZVOUS_PORT": str(global_rendezv_port),
"HOROVOD_CONTROLLER": "gloo",
"HOROVOD_CPU_OPERATIONS": "gloo",
"PYTHONUNBUFFERED": '1',
"OMP_NUM_THREADS": str(self.cores_per_node)
}
for key in os.environ:
if key.startswith("HOROVOD"):
common_envs[key] = os.environ[key]
# todo support other Horovod envs
self.per_worker_envs = [common_envs.copy() for _ in range(self.num_nodes)]
for alloc_info in self.host_alloc_plan:
key = (alloc_info.hostname, alloc_info.local_rank)
local_envs = self.per_worker_envs[name_rank_to_id[key]]
local_envs["HOROVOD_HOSTNAME"] = str(alloc_info.hostname)
local_envs["HOROVOD_RANK"] = str(alloc_info.rank)
local_envs["HOROVOD_SIZE"] = str(alloc_info.size)
local_envs["HOROVOD_LOCAL_RANK"] = str(alloc_info.local_rank)
local_envs["HOROVOD_LOCAL_SIZE"] = str(alloc_info.local_size)
local_envs["HOROVOD_CROSS_RANK"] = str(alloc_info.cross_rank)
local_envs["HOROVOD_CROSS_SIZE"] = str(alloc_info.cross_size)
ray.get([worker.set_gloo_iface.remote() for worker in self.remote_workers])
self.run(lambda: print("horovod worker initialized"))
def run(self, func):
return ray.get([self.remote_workers[i].run.remote(self.per_worker_envs[i], func)
for i in range(self.num_nodes)])
| apache-2.0 |
kunaldawn/vmouse-opencv-qt-hand-gesture-hci | servers/serverMouseEvent.py | 1 | 1649 | #----------------------------------------------------------------------
# VMouse - OpenCV Virtual Mouse (HCI)
# Copyright (C) 2014 Kunal Dawn <kunal.dawn@gmail.com>
# Copyright (C) 2014 Medha Devaraj <medha.devaraj@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#----------------------------------------------------------------------
#!/usr/bin/python
from socket import *
import subprocess
localAddr = ('localhost', 8889)
sock = socket(AF_INET, SOCK_STREAM)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind(localAddr)
sock.listen(5)
data = ""
while 1:
(client, address) = sock.accept()
print 'CONNECTED TO : ', address
while 1:
data = client.recv(1024)
if not len(data):
break
values = data.split(",")
if(len(values) == 2):
print values[0]
subprocess.call(["xdotool", values[0], values[1]])
else:
print 'double click'
subprocess.call(["xdotool", values[0], values[2]])
subprocess.call(["xdotool", values[1], values[2]])
| gpl-3.0 |
atruberg/django-custom | tests/admin_scripts/tests.py | 52 | 81725 | # -*- coding: utf-8 -*-
"""
A series of tests to establish that the command-line managment tools work as
advertised - especially with regards to the handling of the DJANGO_SETTINGS_MODULE
and default settings.py files.
"""
from __future__ import unicode_literals
import os
import re
import shutil
import socket
import subprocess
import sys
import codecs
import django
from django import conf, get_version
from django.conf import settings
from django.core.management import BaseCommand, CommandError
from django.db import connection
from django.test.runner import DiscoverRunner
from django.test.utils import str_prefix
from django.utils import unittest
from django.utils.encoding import force_text
from django.utils._os import upath
from django.utils.six import StringIO
from django.test import LiveServerTestCase
test_dir = os.path.realpath(os.path.join(os.environ['DJANGO_TEST_TEMP_DIR'], 'test_project'))
if not os.path.exists(test_dir):
os.mkdir(test_dir)
open(os.path.join(test_dir, '__init__.py'), 'w').close()
custom_templates_dir = os.path.join(os.path.dirname(__file__), 'custom_templates')
class AdminScriptTestCase(unittest.TestCase):
def write_settings(self, filename, apps=None, is_dir=False, sdict=None):
if is_dir:
settings_dir = os.path.join(test_dir, filename)
os.mkdir(settings_dir)
settings_file_path = os.path.join(settings_dir, '__init__.py')
else:
settings_file_path = os.path.join(test_dir, filename)
with open(settings_file_path, 'w') as settings_file:
settings_file.write('# Settings file automatically generated by admin_scripts test case\n')
exports = [
'DATABASES',
'ROOT_URLCONF',
'SECRET_KEY',
]
for s in exports:
if hasattr(settings, s):
o = getattr(settings, s)
if not isinstance(o, dict):
o = "'%s'" % o
settings_file.write("%s = %s\n" % (s, o))
if apps is None:
apps = ['django.contrib.auth', 'django.contrib.contenttypes', 'admin_scripts']
settings_file.write("INSTALLED_APPS = %s\n" % apps)
if sdict:
for k, v in sdict.items():
settings_file.write("%s = %s\n" % (k, v))
def remove_settings(self, filename, is_dir=False):
full_name = os.path.join(test_dir, filename)
if is_dir:
shutil.rmtree(full_name)
else:
os.remove(full_name)
# Also try to remove the compiled file; if it exists, it could
# mess up later tests that depend upon the .py file not existing
try:
if sys.platform.startswith('java'):
# Jython produces module$py.class files
os.remove(re.sub(r'\.py$', '$py.class', full_name))
else:
# CPython produces module.pyc files
os.remove(full_name + 'c')
except OSError:
pass
# Also remove a __pycache__ directory, if it exists
cache_name = os.path.join(test_dir, '__pycache__')
if os.path.isdir(cache_name):
shutil.rmtree(cache_name)
def _ext_backend_paths(self):
"""
Returns the paths for any external backend packages.
"""
paths = []
first_package_re = re.compile(r'(^[^\.]+)\.')
for backend in settings.DATABASES.values():
result = first_package_re.findall(backend['ENGINE'])
if result and result != ['django']:
backend_pkg = __import__(result[0])
backend_dir = os.path.dirname(backend_pkg.__file__)
paths.append(os.path.dirname(backend_dir))
return paths
def run_test(self, script, args, settings_file=None, apps=None):
project_dir = test_dir
base_dir = os.path.dirname(test_dir)
# The base dir for Django's tests is one level up.
tests_dir = os.path.dirname(os.path.dirname(__file__))
# The base dir for Django is one level above the test dir. We don't use
# `import django` to figure that out, so we don't pick up a Django
# from site-packages or similar.
django_dir = os.path.dirname(tests_dir)
ext_backend_base_dirs = self._ext_backend_paths()
# Remember the old environment
old_django_settings_module = os.environ.get('DJANGO_SETTINGS_MODULE', None)
if sys.platform.startswith('java'):
python_path_var_name = 'JYTHONPATH'
else:
python_path_var_name = 'PYTHONPATH'
old_python_path = os.environ.get(python_path_var_name, None)
old_cwd = os.getcwd()
# Set the test environment
if settings_file:
os.environ['DJANGO_SETTINGS_MODULE'] = settings_file
elif 'DJANGO_SETTINGS_MODULE' in os.environ:
del os.environ['DJANGO_SETTINGS_MODULE']
python_path = [base_dir, django_dir, tests_dir]
python_path.extend(ext_backend_base_dirs)
os.environ[python_path_var_name] = os.pathsep.join(python_path)
# Move to the test directory and run
os.chdir(test_dir)
out, err = subprocess.Popen([sys.executable, script] + args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True).communicate()
# Restore the old environment
if old_django_settings_module:
os.environ['DJANGO_SETTINGS_MODULE'] = old_django_settings_module
if old_python_path:
os.environ[python_path_var_name] = old_python_path
# Move back to the old working directory
os.chdir(old_cwd)
return out, err
def run_django_admin(self, args, settings_file=None):
script_dir = os.path.abspath(os.path.join(os.path.dirname(upath(django.__file__)), 'bin'))
return self.run_test(os.path.join(script_dir, 'django-admin.py'), args, settings_file)
def run_manage(self, args, settings_file=None):
def safe_remove(path):
try:
os.remove(path)
except OSError:
pass
conf_dir = os.path.dirname(upath(conf.__file__))
template_manage_py = os.path.join(conf_dir, 'project_template', 'manage.py')
test_manage_py = os.path.join(test_dir, 'manage.py')
shutil.copyfile(template_manage_py, test_manage_py)
with open(test_manage_py, 'r') as fp:
manage_py_contents = fp.read()
manage_py_contents = manage_py_contents.replace(
"{{ project_name }}", "test_project")
with open(test_manage_py, 'w') as fp:
fp.write(manage_py_contents)
self.addCleanup(safe_remove, test_manage_py)
return self.run_test('./manage.py', args, settings_file)
def assertNoOutput(self, stream):
"Utility assertion: assert that the given stream is empty"
self.assertEqual(len(stream), 0, "Stream should be empty: actually contains '%s'" % stream)
def assertOutput(self, stream, msg):
"Utility assertion: assert that the given message exists in the output"
stream = force_text(stream)
self.assertTrue(msg in stream, "'%s' does not match actual output text '%s'" % (msg, stream))
def assertNotInOutput(self, stream, msg):
"Utility assertion: assert that the given message doesn't exist in the output"
stream = force_text(stream)
self.assertFalse(msg in stream, "'%s' matches actual output text '%s'" % (msg, stream))
##########################################################################
# DJANGO ADMIN TESTS
# This first series of test classes checks the environment processing
# of the django-admin.py script
##########################################################################
class DjangoAdminNoSettings(AdminScriptTestCase):
"A series of tests for django-admin.py when there is no settings.py file."
def test_builtin_command(self):
"no settings: django-admin builtin commands fail with an error when no settings provided"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_bad_settings(self):
"no settings: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"no settings: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
class DjangoAdminDefaultSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings.py file that
contains the test application.
"""
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"default: django-admin builtin commands fail with an error when no settings provided"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"default: django-admin builtin commands succeed if settings are provided as argument"
args = ['sqlall', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"default: django-admin builtin commands succeed if settings are provided in the environment"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_bad_settings(self):
"default: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"default: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"default: django-admin can't execute user commands if it isn't provided settings"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"default: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_environment(self):
"default: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
class DjangoAdminFullPathDefaultSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings.py file that
contains the test application specified using a full path.
"""
def setUp(self):
self.write_settings('settings.py', ['django.contrib.auth', 'django.contrib.contenttypes', 'admin_scripts'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"fulldefault: django-admin builtin commands fail with an error when no settings provided"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"fulldefault: django-admin builtin commands succeed if a settings file is provided"
args = ['sqlall', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"fulldefault: django-admin builtin commands succeed if the environment contains settings"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_bad_settings(self):
"fulldefault: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"fulldefault: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"fulldefault: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"fulldefault: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_environment(self):
"fulldefault: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
class DjangoAdminMinimalSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings.py file that
doesn't contain the test application.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"minimal: django-admin builtin commands fail with an error when no settings provided"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"minimal: django-admin builtin commands fail if settings are provided as argument"
args = ['sqlall', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found')
def test_builtin_with_environment(self):
"minimal: django-admin builtin commands fail if settings are provided in the environment"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found')
def test_builtin_with_bad_settings(self):
"minimal: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"minimal: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"minimal: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"minimal: django-admin can't execute user commands, even if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_environment(self):
"minimal: django-admin can't execute user commands, even if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
class DjangoAdminAlternateSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings file
with a name other than 'settings.py'.
"""
def setUp(self):
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"alternate: django-admin builtin commands fail with an error when no settings provided"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"alternate: django-admin builtin commands succeed if settings are provided as argument"
args = ['sqlall', '--settings=test_project.alternate_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"alternate: django-admin builtin commands succeed if settings are provided in the environment"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_bad_settings(self):
"alternate: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"alternate: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"alternate: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"alternate: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.alternate_settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_environment(self):
"alternate: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'test_project.alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
class DjangoAdminMultipleSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when multiple settings files
(including the default 'settings.py') are available. The default settings
file is insufficient for performing the operations described, so the
alternate settings must be used by the running script.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('settings.py')
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"alternate: django-admin builtin commands fail with an error when no settings provided"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"alternate: django-admin builtin commands succeed if settings are provided as argument"
args = ['sqlall', '--settings=test_project.alternate_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"alternate: django-admin builtin commands succeed if settings are provided in the environment"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_bad_settings(self):
"alternate: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"alternate: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"alternate: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"alternate: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.alternate_settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_environment(self):
"alternate: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'test_project.alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
class DjangoAdminSettingsDirectory(AdminScriptTestCase):
"""
A series of tests for django-admin.py when the settings file is in a
directory. (see #9751).
"""
def setUp(self):
self.write_settings('settings', is_dir=True)
def tearDown(self):
self.remove_settings('settings', is_dir=True)
def test_setup_environ(self):
"directory: startapp creates the correct directory"
args = ['startapp', 'settings_test']
app_path = os.path.join(test_dir, 'settings_test')
out, err = self.run_django_admin(args, 'test_project.settings')
self.addCleanup(shutil.rmtree, app_path)
self.assertNoOutput(err)
self.assertTrue(os.path.exists(app_path))
def test_setup_environ_custom_template(self):
"directory: startapp creates the correct directory with a custom template"
template_path = os.path.join(custom_templates_dir, 'app_template')
args = ['startapp', '--template', template_path, 'custom_settings_test']
app_path = os.path.join(test_dir, 'custom_settings_test')
out, err = self.run_django_admin(args, 'test_project.settings')
self.addCleanup(shutil.rmtree, app_path)
self.assertNoOutput(err)
self.assertTrue(os.path.exists(app_path))
self.assertTrue(os.path.exists(os.path.join(app_path, 'api.py')))
def test_builtin_command(self):
"directory: django-admin builtin commands fail with an error when no settings provided"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_bad_settings(self):
"directory: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"directory: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"directory: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_builtin_with_settings(self):
"directory: django-admin builtin commands succeed if settings are provided as argument"
args = ['sqlall', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"directory: django-admin builtin commands succeed if settings are provided in the environment"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
##########################################################################
# MANAGE.PY TESTS
# This next series of test classes checks the environment processing
# of the generated manage.py script
##########################################################################
class ManageNoSettings(AdminScriptTestCase):
"A series of tests for manage.py when there is no settings.py file."
def test_builtin_command(self):
"no settings: manage.py builtin commands fail with an error when no settings provided"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'test_project.settings'")
def test_builtin_with_bad_settings(self):
"no settings: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"no settings: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
class ManageDefaultSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings.py file that
contains the test application.
"""
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"default: manage.py builtin commands succeed when default settings are appropriate"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_settings(self):
"default: manage.py builtin commands succeed if settings are provided as argument"
args = ['sqlall', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"default: manage.py builtin commands succeed if settings are provided in the environment"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_bad_settings(self):
"default: manage.py builtin commands succeed if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"default: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"default: manage.py can execute user commands when default settings are appropriate"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_settings(self):
"default: manage.py can execute user commands when settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_environment(self):
"default: manage.py can execute user commands when settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
class ManageFullPathDefaultSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings.py file that
contains the test application specified using a full path.
"""
def setUp(self):
self.write_settings('settings.py', ['django.contrib.auth', 'django.contrib.contenttypes', 'admin_scripts'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"fulldefault: manage.py builtin commands succeed when default settings are appropriate"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_settings(self):
"fulldefault: manage.py builtin commands succeed if settings are provided as argument"
args = ['sqlall', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"fulldefault: manage.py builtin commands succeed if settings are provided in the environment"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_bad_settings(self):
"fulldefault: manage.py builtin commands succeed if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"fulldefault: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"fulldefault: manage.py can execute user commands when default settings are appropriate"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_settings(self):
"fulldefault: manage.py can execute user commands when settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_environment(self):
"fulldefault: manage.py can execute user commands when settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
class ManageMinimalSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings.py file that
doesn't contain the test application.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"minimal: manage.py builtin commands fail with an error when no settings provided"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found')
def test_builtin_with_settings(self):
"minimal: manage.py builtin commands fail if settings are provided as argument"
args = ['sqlall', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found')
def test_builtin_with_environment(self):
"minimal: manage.py builtin commands fail if settings are provided in the environment"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found')
def test_builtin_with_bad_settings(self):
"minimal: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"minimal: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"minimal: manage.py can't execute user commands without appropriate settings"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"minimal: manage.py can't execute user commands, even if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_environment(self):
"minimal: manage.py can't execute user commands, even if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
class ManageAlternateSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings file
with a name other than 'settings.py'.
"""
def setUp(self):
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"alternate: manage.py builtin commands fail with an error when no default settings provided"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'test_project.settings'")
def test_builtin_with_settings(self):
"alternate: manage.py builtin commands work with settings provided as argument"
args = ['sqlall', '--settings=alternate_settings', 'admin_scripts']
out, err = self.run_manage(args)
expected = ('create table %s'
% connection.ops.quote_name('admin_scripts_article'))
self.assertTrue(expected.lower() in out.lower())
self.assertNoOutput(err)
def test_builtin_with_environment(self):
"alternate: manage.py builtin commands work if settings are provided in the environment"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args, 'alternate_settings')
expected = ('create table %s'
% connection.ops.quote_name('admin_scripts_article'))
self.assertTrue(expected.lower() in out.lower())
self.assertNoOutput(err)
def test_builtin_with_bad_settings(self):
"alternate: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"alternate: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"alternate: manage.py can't execute user commands without settings"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'test_project.settings'")
def test_custom_command_with_settings(self):
"alternate: manage.py can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertOutput(out, str_prefix("EXECUTE:NoArgsCommand options=[('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', None), ('verbosity', %(_)s'1')]"))
self.assertNoOutput(err)
def test_custom_command_with_environment(self):
"alternate: manage.py can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'alternate_settings')
self.assertOutput(out, str_prefix("EXECUTE:NoArgsCommand options=[('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', %(_)s'1')]"))
self.assertNoOutput(err)
class ManageMultipleSettings(AdminScriptTestCase):
"""A series of tests for manage.py when multiple settings files
(including the default 'settings.py') are available. The default settings
file is insufficient for performing the operations described, so the
alternate settings must be used by the running script.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('settings.py')
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"multiple: manage.py builtin commands fail with an error when no settings provided"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found.')
def test_builtin_with_settings(self):
"multiple: manage.py builtin commands succeed if settings are provided as argument"
args = ['sqlall', '--settings=alternate_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"multiple: manage.py can execute builtin commands if settings are provided in the environment"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args, 'alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_bad_settings(self):
"multiple: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"multiple: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"multiple: manage.py can't execute user commands using default settings"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"multiple: manage.py can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_environment(self):
"multiple: manage.py can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
class ManageSettingsWithSettingsErrors(AdminScriptTestCase):
"""
Tests for manage.py when using the default settings.py file containing
runtime errors.
"""
def tearDown(self):
self.remove_settings('settings.py')
def write_settings_with_import_error(self, filename):
settings_file_path = os.path.join(test_dir, filename)
with open(settings_file_path, 'w') as settings_file:
settings_file.write('# Settings file automatically generated by admin_scripts test case\n')
settings_file.write('# The next line will cause an import error:\nimport foo42bar\n')
def test_import_error(self):
"""
import error: manage.py builtin commands shows useful diagnostic info
when settings with import errors is provided (#14130).
"""
self.write_settings_with_import_error('settings.py')
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named")
self.assertOutput(err, "foo42bar")
def test_attribute_error(self):
"""
manage.py builtin commands does not swallow attribute error due to bad
settings (#18845).
"""
self.write_settings('settings.py', sdict={'BAD_VAR': 'INSTALLED_APPS.crash'})
args = ['collectstatic', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "AttributeError: 'list' object has no attribute 'crash'")
def test_key_error(self):
self.write_settings('settings.py', sdict={'BAD_VAR': 'DATABASES["blah"]'})
args = ['collectstatic', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "KeyError: 'blah'")
class ManageValidate(AdminScriptTestCase):
def tearDown(self):
self.remove_settings('settings.py')
def test_nonexistent_app(self):
"manage.py validate reports an error on a non-existent app in INSTALLED_APPS"
self.write_settings('settings.py', apps=['admin_scriptz.broken_app'], sdict={'USE_I18N': False})
args = ['validate']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'No module named')
self.assertOutput(err, 'admin_scriptz')
def test_broken_app(self):
"manage.py validate reports an ImportError if an app's models.py raises one on import"
self.write_settings('settings.py', apps=['admin_scripts.broken_app'])
args = ['validate']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'ImportError')
def test_complex_app(self):
"manage.py validate does not raise an ImportError validating a complex app with nested calls to load_app"
self.write_settings('settings.py',
apps=['admin_scripts.complex_app', 'admin_scripts.simple_app'],
sdict={'DEBUG': True})
args = ['validate']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, '0 errors found')
def test_app_with_import(self):
"manage.py validate does not raise errors when an app imports a base class that itself has an abstract base"
self.write_settings('settings.py',
apps=['admin_scripts.app_with_import',
'django.contrib.comments',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sites'],
sdict={'DEBUG': True})
args = ['validate']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, '0 errors found')
class CustomTestRunner(DiscoverRunner):
def __init__(self, *args, **kwargs):
assert 'liveserver' not in kwargs
super(CustomTestRunner, self).__init__(*args, **kwargs)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
pass
class ManageTestCommand(AdminScriptTestCase):
def setUp(self):
from django.core.management.commands.test import Command as TestCommand
self.cmd = TestCommand()
def test_liveserver(self):
"""
Ensure that the --liveserver option sets the environment variable
correctly.
Refs #2879.
"""
# Backup original state
address_predefined = 'DJANGO_LIVE_TEST_SERVER_ADDRESS' in os.environ
old_address = os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS')
self.cmd.handle(verbosity=0, testrunner='admin_scripts.tests.CustomTestRunner')
# Original state hasn't changed
self.assertEqual('DJANGO_LIVE_TEST_SERVER_ADDRESS' in os.environ, address_predefined)
self.assertEqual(os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS'), old_address)
self.cmd.handle(verbosity=0, testrunner='admin_scripts.tests.CustomTestRunner',
liveserver='blah')
# Variable was correctly set
self.assertEqual(os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'], 'blah')
# Restore original state
if address_predefined:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = old_address
else:
del os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS']
class ManageRunserver(AdminScriptTestCase):
def setUp(self):
from django.core.management.commands.runserver import Command
def monkey_run(*args, **options):
return
self.cmd = Command()
self.cmd.run = monkey_run
def assertServerSettings(self, addr, port, ipv6=None, raw_ipv6=False):
self.assertEqual(self.cmd.addr, addr)
self.assertEqual(self.cmd.port, port)
self.assertEqual(self.cmd.use_ipv6, ipv6)
self.assertEqual(self.cmd._raw_ipv6, raw_ipv6)
def test_runserver_addrport(self):
self.cmd.handle()
self.assertServerSettings('127.0.0.1', '8000')
self.cmd.handle(addrport="1.2.3.4:8000")
self.assertServerSettings('1.2.3.4', '8000')
self.cmd.handle(addrport="7000")
self.assertServerSettings('127.0.0.1', '7000')
@unittest.skipUnless(socket.has_ipv6, "platform doesn't support IPv6")
def test_runner_addrport_ipv6(self):
self.cmd.handle(addrport="", use_ipv6=True)
self.assertServerSettings('::1', '8000', ipv6=True, raw_ipv6=True)
self.cmd.handle(addrport="7000", use_ipv6=True)
self.assertServerSettings('::1', '7000', ipv6=True, raw_ipv6=True)
self.cmd.handle(addrport="[2001:0db8:1234:5678::9]:7000")
self.assertServerSettings('2001:0db8:1234:5678::9', '7000', ipv6=True, raw_ipv6=True)
def test_runner_hostname(self):
self.cmd.handle(addrport="localhost:8000")
self.assertServerSettings('localhost', '8000')
self.cmd.handle(addrport="test.domain.local:7000")
self.assertServerSettings('test.domain.local', '7000')
@unittest.skipUnless(socket.has_ipv6, "platform doesn't support IPv6")
def test_runner_hostname_ipv6(self):
self.cmd.handle(addrport="test.domain.local:7000", use_ipv6=True)
self.assertServerSettings('test.domain.local', '7000', ipv6=True)
def test_runner_ambiguous(self):
# Only 4 characters, all of which could be in an ipv6 address
self.cmd.handle(addrport="beef:7654")
self.assertServerSettings('beef', '7654')
# Uses only characters that could be in an ipv6 address
self.cmd.handle(addrport="deadbeef:7654")
self.assertServerSettings('deadbeef', '7654')
class ManageRunserverEmptyAllowedHosts(AdminScriptTestCase):
def setUp(self):
self.write_settings('settings.py', sdict={
'ALLOWED_HOSTS': [],
'DEBUG': False,
})
def tearDown(self):
self.remove_settings('settings.py')
def test_empty_allowed_hosts_error(self):
out, err = self.run_manage(['runserver'])
self.assertNoOutput(out)
self.assertOutput(err, 'CommandError: You must set settings.ALLOWED_HOSTS if DEBUG is False.')
##########################################################################
# COMMAND PROCESSING TESTS
# Check that user-space commands are correctly handled - in particular,
# that arguments to the commands are correctly parsed and processed.
##########################################################################
class CommandTypes(AdminScriptTestCase):
"Tests for the various types of base command types that can be defined."
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_version(self):
"version is handled as a special case"
args = ['version']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, get_version())
def test_version_alternative(self):
"--version is equivalent to version"
args1, args2 = ['version'], ['--version']
self.assertEqual(self.run_manage(args1), self.run_manage(args2))
def test_help(self):
"help is handled as a special case"
args = ['help']
out, err = self.run_manage(args)
self.assertOutput(out, "Usage: manage.py subcommand [options] [args]")
self.assertOutput(out, "Type 'manage.py help <subcommand>' for help on a specific subcommand.")
self.assertOutput(out, '[django]')
self.assertOutput(out, 'startapp')
self.assertOutput(out, 'startproject')
def test_help_commands(self):
"help --commands shows the list of all available commands"
args = ['help', '--commands']
out, err = self.run_manage(args)
self.assertNotInOutput(out, 'Usage:')
self.assertNotInOutput(out, 'Options:')
self.assertNotInOutput(out, '[django]')
self.assertOutput(out, 'startapp')
self.assertOutput(out, 'startproject')
self.assertNotInOutput(out, '\n\n')
def test_help_alternative(self):
"--help is equivalent to help"
args1, args2 = ['help'], ['--help']
self.assertEqual(self.run_manage(args1), self.run_manage(args2))
def test_help_short_altert(self):
"-h is handled as a short form of --help"
args1, args2 = ['--help'], ['-h']
self.assertEqual(self.run_manage(args1), self.run_manage(args2))
def test_specific_help(self):
"--help can be used on a specific command"
args = ['sqlall', '--help']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "Prints the CREATE TABLE, custom SQL and CREATE INDEX SQL statements for the given model module name(s).")
def test_base_command(self):
"User BaseCommands can execute when a label is provided"
args = ['base_command', 'testlabel']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, str_prefix("EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', '1'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', %(_)s'1')]"))
def test_base_command_no_label(self):
"User BaseCommands can execute when no labels are provided"
args = ['base_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, str_prefix("EXECUTE:BaseCommand labels=(), options=[('option_a', '1'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', %(_)s'1')]"))
def test_base_command_multiple_label(self):
"User BaseCommands can execute when no labels are provided"
args = ['base_command', 'testlabel', 'anotherlabel']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, str_prefix("EXECUTE:BaseCommand labels=('testlabel', 'anotherlabel'), options=[('option_a', '1'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', %(_)s'1')]"))
def test_base_command_with_option(self):
"User BaseCommands can execute with options when a label is provided"
args = ['base_command', 'testlabel', '--option_a=x']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, str_prefix("EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', %(_)s'1')]"))
def test_base_command_with_options(self):
"User BaseCommands can execute with multiple options when a label is provided"
args = ['base_command', 'testlabel', '-a', 'x', '--option_b=y']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, str_prefix("EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', 'y'), ('option_c', '3'), ('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', %(_)s'1')]"))
def test_base_run_from_argv(self):
"""
Test run_from_argv properly terminates even with custom execute() (#19665)
Also test proper traceback display.
"""
command = BaseCommand()
def raise_command_error(*args, **kwargs):
raise CommandError("Custom error")
old_stderr = sys.stderr
sys.stderr = err = StringIO()
try:
command.execute = lambda args: args # This will trigger TypeError
# If the Exception is not CommandError it should always
# raise the original exception.
with self.assertRaises(TypeError):
command.run_from_argv(['', ''])
# If the Exception is CommandError and --traceback is not present
# this command should raise a SystemExit and don't print any
# traceback to the stderr.
command.execute = raise_command_error
err.truncate(0)
with self.assertRaises(SystemExit):
command.run_from_argv(['', ''])
err_message = err.getvalue()
self.assertNotIn("Traceback", err_message)
self.assertIn("CommandError", err_message)
# If the Exception is CommandError and --traceback is present
# this command should raise the original CommandError as if it
# were not a CommandError.
err.truncate(0)
with self.assertRaises(CommandError):
command.run_from_argv(['', '', '--traceback'])
finally:
sys.stderr = old_stderr
def test_run_from_argv_non_ascii_error(self):
"""
Test that non-ascii message of CommandError does not raise any
UnicodeDecodeError in run_from_argv.
"""
def raise_command_error(*args, **kwargs):
raise CommandError("Erreur personnalisée")
command = BaseCommand()
command.execute = raise_command_error
command.stderr = StringIO()
with self.assertRaises(SystemExit):
command.run_from_argv(['', ''])
def test_noargs(self):
"NoArg Commands can be executed"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, str_prefix("EXECUTE:NoArgsCommand options=[('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', %(_)s'1')]"))
def test_noargs_with_args(self):
"NoArg Commands raise an error if an argument is provided"
args = ['noargs_command', 'argument']
out, err = self.run_manage(args)
self.assertOutput(err, "Error: Command doesn't accept any arguments")
def test_app_command(self):
"User AppCommands can execute when a single app name is provided"
args = ['app_command', 'auth']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:AppCommand app=<module 'django.contrib.auth.models'")
self.assertOutput(out, "module 'django.contrib.auth.models' from")
self.assertOutput(out, str_prefix("'>, options=[('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', %(_)s'1')]"))
def test_app_command_no_apps(self):
"User AppCommands raise an error when no app name is provided"
args = ['app_command']
out, err = self.run_manage(args)
self.assertOutput(err, 'Error: Enter at least one appname.')
def test_app_command_multiple_apps(self):
"User AppCommands raise an error when multiple app names are provided"
args = ['app_command', 'auth', 'contenttypes']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:AppCommand app=<module 'django.contrib.auth.models'")
self.assertOutput(out, "module 'django.contrib.auth.models' from")
self.assertOutput(out, str_prefix("'>, options=[('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', %(_)s'1')]"))
self.assertOutput(out, "EXECUTE:AppCommand app=<module 'django.contrib.contenttypes.models'")
self.assertOutput(out, "module 'django.contrib.contenttypes.models' from")
self.assertOutput(out, str_prefix("'>, options=[('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', %(_)s'1')]"))
def test_app_command_invalid_appname(self):
"User AppCommands can execute when a single app name is provided"
args = ['app_command', 'NOT_AN_APP']
out, err = self.run_manage(args)
self.assertOutput(err, "App with label NOT_AN_APP could not be found")
def test_app_command_some_invalid_appnames(self):
"User AppCommands can execute when some of the provided app names are invalid"
args = ['app_command', 'auth', 'NOT_AN_APP']
out, err = self.run_manage(args)
self.assertOutput(err, "App with label NOT_AN_APP could not be found")
def test_label_command(self):
"User LabelCommands can execute when a label is provided"
args = ['label_command', 'testlabel']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, str_prefix("EXECUTE:LabelCommand label=testlabel, options=[('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', %(_)s'1')]"))
def test_label_command_no_label(self):
"User LabelCommands raise an error if no label is provided"
args = ['label_command']
out, err = self.run_manage(args)
self.assertOutput(err, 'Enter at least one label')
def test_label_command_multiple_label(self):
"User LabelCommands are executed multiple times if multiple labels are provided"
args = ['label_command', 'testlabel', 'anotherlabel']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, str_prefix("EXECUTE:LabelCommand label=testlabel, options=[('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', %(_)s'1')]"))
self.assertOutput(out, str_prefix("EXECUTE:LabelCommand label=anotherlabel, options=[('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', %(_)s'1')]"))
class ArgumentOrder(AdminScriptTestCase):
"""Tests for 2-stage argument parsing scheme.
django-admin command arguments are parsed in 2 parts; the core arguments
(--settings, --traceback and --pythonpath) are parsed using a Lax parser.
This Lax parser ignores any unknown options. Then the full settings are
passed to the command parser, which extracts commands of interest to the
individual command.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('settings.py')
self.remove_settings('alternate_settings.py')
def test_setting_then_option(self):
"Options passed after settings are correctly handled"
args = ['base_command', 'testlabel', '--settings=alternate_settings', '--option_a=x']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, str_prefix("EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', None), ('verbosity', %(_)s'1')]"))
def test_setting_then_short_option(self):
"Short options passed after settings are correctly handled"
args = ['base_command', 'testlabel', '--settings=alternate_settings', '--option_a=x']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, str_prefix("EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', None), ('verbosity', %(_)s'1')]"))
def test_option_then_setting(self):
"Options passed before settings are correctly handled"
args = ['base_command', 'testlabel', '--option_a=x', '--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, str_prefix("EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', None), ('verbosity', %(_)s'1')]"))
def test_short_option_then_setting(self):
"Short options passed before settings are correctly handled"
args = ['base_command', 'testlabel', '-a', 'x', '--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, str_prefix("EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', None), ('verbosity', %(_)s'1')]"))
def test_option_then_setting_then_option(self):
"Options are correctly handled when they are passed before and after a setting"
args = ['base_command', 'testlabel', '--option_a=x', '--settings=alternate_settings', '--option_b=y']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, str_prefix("EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', 'y'), ('option_c', '3'), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', None), ('verbosity', %(_)s'1')]"))
class StartProject(LiveServerTestCase, AdminScriptTestCase):
available_apps = [
'admin_scripts',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
]
def test_wrong_args(self):
"Make sure passing the wrong kinds of arguments raises a CommandError"
out, err = self.run_django_admin(['startproject'])
self.assertNoOutput(out)
self.assertOutput(err, "you must provide a project name")
def test_simple_project(self):
"Make sure the startproject management command creates a project"
args = ['startproject', 'testproject']
testproject_dir = os.path.join(test_dir, 'testproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
# running again..
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "already exists")
def test_invalid_project_name(self):
"Make sure the startproject management command validates a project name"
for bad_name in ('7testproject', '../testproject'):
args = ['startproject', bad_name]
testproject_dir = os.path.join(test_dir, bad_name)
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertOutput(err, "Error: '%s' is not a valid project name. "
"Please make sure the name begins with a letter or underscore." % bad_name)
self.assertFalse(os.path.exists(testproject_dir))
def test_simple_project_different_directory(self):
"Make sure the startproject management command creates a project in a specific directory"
args = ['startproject', 'testproject', 'othertestproject']
testproject_dir = os.path.join(test_dir, 'othertestproject')
os.mkdir(testproject_dir)
self.addCleanup(shutil.rmtree, testproject_dir)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'manage.py')))
# running again..
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "already exists")
def test_custom_project_template(self):
"Make sure the startproject management command is able to use a different project template"
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['startproject', '--template', template_path, 'customtestproject']
testproject_dir = os.path.join(test_dir, 'customtestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'additional_dir')))
def test_template_dir_with_trailing_slash(self):
"Ticket 17475: Template dir passed has a trailing path separator"
template_path = os.path.join(custom_templates_dir, 'project_template' + os.sep)
args = ['startproject', '--template', template_path, 'customtestproject']
testproject_dir = os.path.join(test_dir, 'customtestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'additional_dir')))
def test_custom_project_template_from_tarball_by_path(self):
"Make sure the startproject management command is able to use a different project template from a tarball"
template_path = os.path.join(custom_templates_dir, 'project_template.tgz')
args = ['startproject', '--template', template_path, 'tarballtestproject']
testproject_dir = os.path.join(test_dir, 'tarballtestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py')))
def test_custom_project_template_from_tarball_to_alternative_location(self):
"Startproject can use a project template from a tarball and create it in a specified location"
template_path = os.path.join(custom_templates_dir, 'project_template.tgz')
args = ['startproject', '--template', template_path, 'tarballtestproject', 'altlocation']
testproject_dir = os.path.join(test_dir, 'altlocation')
os.mkdir(testproject_dir)
self.addCleanup(shutil.rmtree, testproject_dir)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py')))
def test_custom_project_template_from_tarball_by_url(self):
"Make sure the startproject management command is able to use a different project template from a tarball via a url"
template_url = '%s/admin_scripts/custom_templates/project_template.tgz' % self.live_server_url
args = ['startproject', '--template', template_url, 'urltestproject']
testproject_dir = os.path.join(test_dir, 'urltestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py')))
def test_project_template_tarball_url(self):
"Startproject management command handles project template tar/zip balls from non-canonical urls"
template_url = '%s/admin_scripts/custom_templates/project_template.tgz/' % self.live_server_url
args = ['startproject', '--template', template_url, 'urltestproject']
testproject_dir = os.path.join(test_dir, 'urltestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py')))
def test_file_without_extension(self):
"Make sure the startproject management command is able to render custom files"
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['startproject', '--template', template_path, 'customtestproject', '-e', 'txt', '-n', 'Procfile']
testproject_dir = os.path.join(test_dir, 'customtestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'additional_dir')))
base_path = os.path.join(testproject_dir, 'additional_dir')
for f in ('Procfile', 'additional_file.py', 'requirements.txt'):
self.assertTrue(os.path.exists(os.path.join(base_path, f)))
with open(os.path.join(base_path, f)) as fh:
self.assertEqual(fh.read(),
'# some file for customtestproject test project')
def test_custom_project_template_context_variables(self):
"Make sure template context variables are rendered with proper values"
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['startproject', '--template', template_path, 'another_project', 'project_dir']
testproject_dir = os.path.join(test_dir, 'project_dir')
os.mkdir(testproject_dir)
self.addCleanup(shutil.rmtree, testproject_dir)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
test_manage_py = os.path.join(testproject_dir, 'manage.py')
with open(test_manage_py, 'r') as fp:
content = force_text(fp.read())
self.assertIn("project_name = 'another_project'", content)
self.assertIn("project_directory = '%s'" % testproject_dir, content)
def test_no_escaping_of_project_variables(self):
"Make sure template context variables are not html escaped"
# We're using a custom command so we need the alternate settings
self.write_settings('alternate_settings.py')
self.addCleanup(self.remove_settings, 'alternate_settings.py')
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['custom_startproject', '--template', template_path, 'another_project', 'project_dir', '--extra', '<&>', '--settings=alternate_settings']
testproject_dir = os.path.join(test_dir, 'project_dir')
os.mkdir(testproject_dir)
self.addCleanup(shutil.rmtree, testproject_dir)
out, err = self.run_manage(args)
self.assertNoOutput(err)
test_manage_py = os.path.join(testproject_dir, 'additional_dir', 'extra.py')
with open(test_manage_py, 'r') as fp:
content = fp.read()
self.assertIn("<&>", content)
def test_custom_project_destination_missing(self):
"""
Make sure an exception is raised when the provided
destination directory doesn't exist
"""
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['startproject', '--template', template_path, 'yet_another_project', 'project_dir2']
testproject_dir = os.path.join(test_dir, 'project_dir2')
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Destination directory '%s' does not exist, please create it first." % testproject_dir)
self.assertFalse(os.path.exists(testproject_dir))
def test_custom_project_template_with_non_ascii_templates(self):
"Ticket 18091: Make sure the startproject management command is able to render templates with non-ASCII content"
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['startproject', '--template', template_path, '--extension=txt', 'customtestproject']
testproject_dir = os.path.join(test_dir, 'customtestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
path = os.path.join(testproject_dir, 'ticket-18091-non-ascii-template.txt')
with codecs.open(path, 'r', encoding='utf-8') as f:
self.assertEqual(f.read().splitlines(False), [
'Some non-ASCII text for testing ticket #18091:',
'üäö €'])
class DiffSettings(AdminScriptTestCase):
"""Tests for diffsettings management command."""
def test_basic(self):
"""Runs without error and emits settings diff."""
self.write_settings('settings_to_diff.py', sdict={'FOO': '"bar"'})
self.addCleanup(self.remove_settings, 'settings_to_diff.py')
args = ['diffsettings', '--settings=settings_to_diff']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "FOO = 'bar' ###")
def test_all(self):
"""The all option also shows settings with the default value."""
self.write_settings('settings_to_diff.py', sdict={'STATIC_URL': 'None'})
self.addCleanup(self.remove_settings, 'settings_to_diff.py')
args = ['diffsettings', '--settings=settings_to_diff', '--all']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "### STATIC_URL = None")
class Dumpdata(AdminScriptTestCase):
"""Tests for dumpdata management command."""
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_pks_parsing(self):
"""Regression for #20509
Test would raise an exception rather than printing an error message.
"""
args = ['dumpdata', '--pks=1']
out, err = self.run_manage(args)
self.assertOutput(err, "You can only use --pks option with one model")
self.assertNoOutput(out)
| bsd-3-clause |
openstack-ja/horizon | openstack_dashboard/dashboards/admin/routers/ports/tables.py | 6 | 1744 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import tables
from openstack_dashboard.dashboards.project.networks.ports \
import tables as networks_tables
from openstack_dashboard.dashboards.project.routers.ports \
import tables as routers_tables
LOG = logging.getLogger(__name__)
class PortsTable(tables.DataTable):
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:admin:networks:ports:detail")
fixed_ips = tables.Column(networks_tables.get_fixed_ips,
verbose_name=_("Fixed IPs"))
status = tables.Column("status", verbose_name=_("Status"))
device_owner = tables.Column(routers_tables.get_device_owner,
verbose_name=_("Type"))
admin_state = tables.Column("admin_state",
verbose_name=_("Admin State"))
def get_object_display(self, port):
return port.id
class Meta:
name = "interfaces"
verbose_name = _("Interfaces")
| apache-2.0 |
lbeckman314/dotfiles | i3/kb-light.py | 1 | 1215 | #!/usr/bin/env python3
# coding: utf-8
from sys import argv
import dbus
def kb_light_set(delta):
bus = dbus.SystemBus()
kbd_backlight_proxy = bus.get_object('org.freedesktop.UPower', '/org/freedesktop/UPower/KbdBacklight')
kbd_backlight = dbus.Interface(kbd_backlight_proxy, 'org.freedesktop.UPower.KbdBacklight')
current = kbd_backlight.GetBrightness()
maximum = kbd_backlight.GetMaxBrightness()
new = max(0, current + delta)
if new >= 0 and new <= maximum:
current = new
kbd_backlight.SetBrightness(current)
# Return current backlight level percentage
return "%d%%" % int(100 * current / maximum)
if __name__ == '__main__':
if len(argv[1:]) == 1:
if argv[1] == "--up" or argv[1] == "+":
# ./kb-light.py (+|--up) to increment
print(kb_light_set(5))
elif argv[1] == "--down" or argv[1] == "-":
# ./kb-light.py (-|--down) to decrement
print(kb_light_set(-5))
elif argv[1] == "--get":
print(kb_light_set(0))
else:
print("Unknown argument:", argv[1])
else:
print("Script takes exactly one argument.", len(argv[1:]), "arguments provided.")
| mit |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/numpy/lib/twodim_base.py | 34 | 25580 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
absolute, asanyarray, arange, zeros, greater_equal, multiply, ones,
asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo, transpose
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2-D histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(2, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
>>> H = H.T # Let each row list bins with common y range.
:func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131, title='imshow: square bins')
>>> plt.imshow(H, interpolation='nearest', origin='low',
... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:
>>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',
... aspect='equal')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
:class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to
display actual bin edges with interpolation:
>>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = (xedges[:-1] + xedges[1:]) / 2
>>> ycenters = (yedges[:-1] + yedges[1:]) / 2
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| gpl-3.0 |
fajoy/nova | nova/api/openstack/compute/servers.py | 1 | 55112 | # Copyright 2010 OpenStack LLC.
# Copyright 2011 Piston Cloud Computing, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import os
import re
import socket
import webob
from webob import exc
from xml.dom import minidom
from nova.api.openstack import common
from nova.api.openstack.compute import ips
from nova.api.openstack.compute.views import servers as views_servers
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova.compute import instance_types
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import utils
server_opts = [
cfg.BoolOpt('enable_instance_password',
default=True,
help='Allows use of instance password during '
'server creation'),
]
CONF = cfg.CONF
CONF.register_opts(server_opts)
CONF.import_opt('network_api_class', 'nova.network')
CONF.import_opt('reclaim_instance_interval', 'nova.compute.manager')
LOG = logging.getLogger(__name__)
def make_fault(elem):
fault = xmlutil.SubTemplateElement(elem, 'fault', selector='fault')
fault.set('code')
fault.set('created')
msg = xmlutil.SubTemplateElement(fault, 'message')
msg.text = 'message'
det = xmlutil.SubTemplateElement(fault, 'details')
det.text = 'details'
def make_server(elem, detailed=False):
elem.set('name')
elem.set('id')
if detailed:
elem.set('userId', 'user_id')
elem.set('tenantId', 'tenant_id')
elem.set('updated')
elem.set('created')
elem.set('hostId')
elem.set('accessIPv4')
elem.set('accessIPv6')
elem.set('status')
elem.set('progress')
elem.set('reservation_id')
# Attach image node
image = xmlutil.SubTemplateElement(elem, 'image', selector='image')
image.set('id')
xmlutil.make_links(image, 'links')
# Attach flavor node
flavor = xmlutil.SubTemplateElement(elem, 'flavor', selector='flavor')
flavor.set('id')
xmlutil.make_links(flavor, 'links')
# Attach fault node
make_fault(elem)
# Attach metadata node
elem.append(common.MetadataTemplate())
# Attach addresses node
elem.append(ips.AddressesTemplate())
xmlutil.make_links(elem, 'links')
server_nsmap = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM}
class ServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root, detailed=True)
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class MinimalServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
xmlutil.make_links(root, 'servers_links')
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class ServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem, detailed=True)
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class ServerAdminPassTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
root.set('adminPass')
return xmlutil.SlaveTemplate(root, 1, nsmap=server_nsmap)
def FullServerTemplate():
master = ServerTemplate()
master.attach(ServerAdminPassTemplate())
return master
class CommonDeserializer(wsgi.MetadataXMLDeserializer):
"""Common deserializer to handle xml-formatted server create requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
metadata_deserializer = common.MetadataXMLDeserializer()
def _extract_personality(self, server_node):
"""Marshal the personality attribute of a parsed request."""
node = self.find_first_child_named(server_node, "personality")
if node is not None:
personality = []
for file_node in self.find_children_named(node, "file"):
item = {}
if file_node.hasAttribute("path"):
item["path"] = file_node.getAttribute("path")
item["contents"] = self.extract_text(file_node)
personality.append(item)
return personality
else:
return None
def _extract_server(self, node):
"""Marshal the server attribute of a parsed request."""
server = {}
server_node = self.find_first_child_named(node, 'server')
attributes = ["name", "imageRef", "flavorRef", "adminPass",
"accessIPv4", "accessIPv6", "key_name",
"availability_zone", "min_count", "max_count"]
for attr in attributes:
if server_node.getAttribute(attr):
server[attr] = server_node.getAttribute(attr)
res_id = server_node.getAttribute('return_reservation_id')
if res_id:
server['return_reservation_id'] = utils.bool_from_str(res_id)
scheduler_hints = self._extract_scheduler_hints(server_node)
if scheduler_hints:
server['OS-SCH-HNT:scheduler_hints'] = scheduler_hints
metadata_node = self.find_first_child_named(server_node, "metadata")
if metadata_node is not None:
server["metadata"] = self.extract_metadata(metadata_node)
user_data_node = self.find_first_child_named(server_node, "user_data")
if user_data_node is not None:
server["user_data"] = self.extract_text(user_data_node)
personality = self._extract_personality(server_node)
if personality is not None:
server["personality"] = personality
networks = self._extract_networks(server_node)
if networks is not None:
server["networks"] = networks
security_groups = self._extract_security_groups(server_node)
if security_groups is not None:
server["security_groups"] = security_groups
# NOTE(vish): this is not namespaced in json, so leave it without a
# namespace for now
block_device_mapping = self._extract_block_device_mapping(server_node)
if block_device_mapping is not None:
server["block_device_mapping"] = block_device_mapping
# NOTE(vish): Support this incorrect version because it was in the code
# base for a while and we don't want to accidentally break
# anyone that might be using it.
auto_disk_config = server_node.getAttribute('auto_disk_config')
if auto_disk_config:
server['OS-DCF:diskConfig'] = utils.bool_from_str(auto_disk_config)
auto_disk_config = server_node.getAttribute('OS-DCF:diskConfig')
if auto_disk_config:
server['OS-DCF:diskConfig'] = utils.bool_from_str(auto_disk_config)
config_drive = server_node.getAttribute('config_drive')
if config_drive:
server['config_drive'] = config_drive
return server
def _extract_block_device_mapping(self, server_node):
"""Marshal the block_device_mapping node of a parsed request"""
node = self.find_first_child_named(server_node, "block_device_mapping")
if node:
block_device_mapping = []
for child in self.extract_elements(node):
if child.nodeName != "mapping":
continue
mapping = {}
attributes = ["volume_id", "snapshot_id", "device_name",
"virtual_name", "volume_size"]
for attr in attributes:
value = child.getAttribute(attr)
if value:
mapping[attr] = value
attributes = ["delete_on_termination", "no_device"]
for attr in attributes:
value = child.getAttribute(attr)
if value:
mapping[attr] = utils.bool_from_str(value)
block_device_mapping.append(mapping)
return block_device_mapping
else:
return None
def _extract_scheduler_hints(self, server_node):
"""Marshal the scheduler hints attribute of a parsed request"""
node = self.find_first_child_named_in_namespace(server_node,
"http://docs.openstack.org/compute/ext/scheduler-hints/api/v2",
"scheduler_hints")
if node:
scheduler_hints = {}
for child in self.extract_elements(node):
scheduler_hints.setdefault(child.nodeName, [])
value = self.extract_text(child).strip()
scheduler_hints[child.nodeName].append(value)
return scheduler_hints
else:
return None
def _extract_networks(self, server_node):
"""Marshal the networks attribute of a parsed request."""
node = self.find_first_child_named(server_node, "networks")
if node is not None:
networks = []
for network_node in self.find_children_named(node,
"network"):
item = {}
if network_node.hasAttribute("uuid"):
item["uuid"] = network_node.getAttribute("uuid")
if network_node.hasAttribute("fixed_ip"):
item["fixed_ip"] = network_node.getAttribute("fixed_ip")
if network_node.hasAttribute("port"):
item["port"] = network_node.getAttribute("port")
networks.append(item)
return networks
else:
return None
def _extract_security_groups(self, server_node):
"""Marshal the security_groups attribute of a parsed request."""
node = self.find_first_child_named(server_node, "security_groups")
if node is not None:
security_groups = []
for sg_node in self.find_children_named(node, "security_group"):
item = {}
name = self.find_attribute_or_element(sg_node, 'name')
if name:
item["name"] = name
security_groups.append(item)
return security_groups
else:
return None
class ActionDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted server action requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
def default(self, string):
dom = minidom.parseString(string)
action_node = dom.childNodes[0]
action_name = action_node.tagName
action_deserializer = {
'createImage': self._action_create_image,
'changePassword': self._action_change_password,
'reboot': self._action_reboot,
'rebuild': self._action_rebuild,
'resize': self._action_resize,
'confirmResize': self._action_confirm_resize,
'revertResize': self._action_revert_resize,
}.get(action_name, super(ActionDeserializer, self).default)
action_data = action_deserializer(action_node)
return {'body': {action_name: action_data}}
def _action_create_image(self, node):
return self._deserialize_image_action(node, ('name',))
def _action_change_password(self, node):
if not node.hasAttribute("adminPass"):
raise AttributeError("No adminPass was specified in request")
return {"adminPass": node.getAttribute("adminPass")}
def _action_reboot(self, node):
if not node.hasAttribute("type"):
raise AttributeError("No reboot type was specified in request")
return {"type": node.getAttribute("type")}
def _action_rebuild(self, node):
rebuild = {}
if node.hasAttribute("name"):
name = node.getAttribute("name")
if not name:
raise AttributeError("Name cannot be blank")
rebuild['name'] = name
if node.hasAttribute("auto_disk_config"):
rebuild['auto_disk_config'] = node.getAttribute("auto_disk_config")
metadata_node = self.find_first_child_named(node, "metadata")
if metadata_node is not None:
rebuild["metadata"] = self.extract_metadata(metadata_node)
personality = self._extract_personality(node)
if personality is not None:
rebuild["personality"] = personality
if not node.hasAttribute("imageRef"):
raise AttributeError("No imageRef was specified in request")
rebuild["imageRef"] = node.getAttribute("imageRef")
if node.hasAttribute("adminPass"):
rebuild["adminPass"] = node.getAttribute("adminPass")
if node.hasAttribute("accessIPv4"):
rebuild["accessIPv4"] = node.getAttribute("accessIPv4")
if node.hasAttribute("accessIPv6"):
rebuild["accessIPv6"] = node.getAttribute("accessIPv6")
return rebuild
def _action_resize(self, node):
resize = {}
if node.hasAttribute("flavorRef"):
resize["flavorRef"] = node.getAttribute("flavorRef")
else:
raise AttributeError("No flavorRef was specified in request")
if node.hasAttribute("auto_disk_config"):
resize['auto_disk_config'] = node.getAttribute("auto_disk_config")
return resize
def _action_confirm_resize(self, node):
return None
def _action_revert_resize(self, node):
return None
def _deserialize_image_action(self, node, allowed_attributes):
data = {}
for attribute in allowed_attributes:
value = node.getAttribute(attribute)
if value:
data[attribute] = value
metadata_node = self.find_first_child_named(node, 'metadata')
if metadata_node is not None:
metadata = self.metadata_deserializer.extract_metadata(
metadata_node)
data['metadata'] = metadata
return data
class CreateDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted server create requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
def default(self, string):
"""Deserialize an xml-formatted server create request."""
dom = minidom.parseString(string)
server = self._extract_server(dom)
return {'body': {'server': server}}
class Controller(wsgi.Controller):
"""The Server API base controller class for the OpenStack API."""
_view_builder_class = views_servers.ViewBuilder
@staticmethod
def _add_location(robj):
# Just in case...
if 'server' not in robj.obj:
return robj
link = filter(lambda l: l['rel'] == 'self',
robj.obj['server']['links'])
if link:
robj['Location'] = link[0]['href'].encode('utf-8')
# Convenience return
return robj
def __init__(self, ext_mgr=None, **kwargs):
super(Controller, self).__init__(**kwargs)
self.compute_api = compute.API()
self.ext_mgr = ext_mgr
self.quantum_attempted = False
@wsgi.serializers(xml=MinimalServersTemplate)
def index(self, req):
"""Returns a list of server names and ids for a given user."""
try:
servers = self._get_servers(req, is_detail=False)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=str(err))
return servers
@wsgi.serializers(xml=ServersTemplate)
def detail(self, req):
"""Returns a list of server details for a given user."""
try:
servers = self._get_servers(req, is_detail=True)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=str(err))
return servers
def _add_instance_faults(self, ctxt, instances):
faults = self.compute_api.get_instance_faults(ctxt, instances)
if faults is not None:
for instance in instances:
faults_list = faults.get(instance['uuid'], [])
try:
instance['fault'] = faults_list[0]
except IndexError:
pass
return instances
def _get_servers(self, req, is_detail):
"""Returns a list of servers, based on any search options specified."""
search_opts = {}
search_opts.update(req.GET)
context = req.environ['nova.context']
remove_invalid_options(context, search_opts,
self._get_server_search_options())
# Verify search by 'status' contains a valid status.
# Convert it to filter by vm_state for compute_api.
status = search_opts.pop('status', None)
if status is not None:
state = common.vm_state_from_status(status)
if state is None:
return {'servers': []}
search_opts['vm_state'] = state
if 'changes-since' in search_opts:
try:
parsed = timeutils.parse_isotime(search_opts['changes-since'])
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
search_opts['changes-since'] = parsed
# By default, compute's get_all() will return deleted instances.
# If an admin hasn't specified a 'deleted' search option, we need
# to filter out deleted instances by setting the filter ourselves.
# ... Unless 'changes-since' is specified, because 'changes-since'
# should return recently deleted images according to the API spec.
if 'deleted' not in search_opts:
if 'changes-since' not in search_opts:
# No 'changes-since', so we only want non-deleted servers
search_opts['deleted'] = False
if search_opts.get("vm_state") == "deleted":
if context.is_admin:
search_opts['deleted'] = True
else:
msg = _("Only administrators may list deleted instances")
raise exc.HTTPBadRequest(explanation=msg)
if 'all_tenants' not in search_opts:
if context.project_id:
search_opts['project_id'] = context.project_id
else:
search_opts['user_id'] = context.user_id
limit, marker = common.get_limit_and_marker(req)
try:
instance_list = self.compute_api.get_all(context,
search_opts=search_opts,
limit=limit,
marker=marker)
except exception.MarkerNotFound as e:
msg = _('marker [%s] not found') % marker
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound as e:
msg = _("Flavor could not be found")
raise webob.exc.HTTPUnprocessableEntity(explanation=msg)
if is_detail:
self._add_instance_faults(context, instance_list)
response = self._view_builder.detail(req, instance_list)
else:
response = self._view_builder.index(req, instance_list)
req.cache_db_instances(instance_list)
return response
def _get_server(self, context, req, instance_uuid):
"""Utility function for looking up an instance by uuid."""
try:
instance = self.compute_api.get(context, instance_uuid)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
req.cache_db_instance(instance)
return instance
def _validate_server_name(self, value):
if not isinstance(value, basestring):
msg = _("Server name is not a string or unicode")
raise exc.HTTPBadRequest(explanation=msg)
if not value.strip():
msg = _("Server name is an empty string")
raise exc.HTTPBadRequest(explanation=msg)
if not len(value) < 256:
msg = _("Server name must be less than 256 characters.")
raise exc.HTTPBadRequest(explanation=msg)
def _get_injected_files(self, personality):
"""Create a list of injected files from the personality attribute.
At this time, injected_files must be formatted as a list of
(file_path, file_content) pairs for compatibility with the
underlying compute service.
"""
injected_files = []
for item in personality:
try:
path = item['path']
contents = item['contents']
except KeyError as key:
expl = _('Bad personality format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad personality format')
raise exc.HTTPBadRequest(explanation=expl)
contents = self._decode_base64(contents)
if contents is None:
expl = _('Personality content for %s cannot be decoded') % path
raise exc.HTTPBadRequest(explanation=expl)
injected_files.append((path, contents))
return injected_files
def _is_quantum_v2(self):
# NOTE(dprince): quantumclient is not a requirement
if self.quantum_attempted:
return self.have_quantum
try:
self.quantum_attempted = True
from nova.network.quantumv2 import api as quantum_api
self.have_quantum = issubclass(
importutils.import_class(CONF.network_api_class),
quantum_api.API)
except ImportError:
self.have_quantum = False
return self.have_quantum
def _get_requested_networks(self, requested_networks):
"""Create a list of requested networks from the networks attribute."""
networks = []
for network in requested_networks:
try:
port_id = network.get('port', None)
if port_id:
network_uuid = None
if not self._is_quantum_v2():
# port parameter is only for qunatum v2.0
msg = _("Unknown argment : port")
raise exc.HTTPBadRequest(explanation=msg)
if not uuidutils.is_uuid_like(port_id):
msg = _("Bad port format: port uuid is "
"not in proper format "
"(%s)") % port_id
raise exc.HTTPBadRequest(explanation=msg)
else:
network_uuid = network['uuid']
if not port_id and not uuidutils.is_uuid_like(network_uuid):
br_uuid = network_uuid.split('-', 1)[-1]
if not uuidutils.is_uuid_like(br_uuid):
msg = _("Bad networks format: network uuid is "
"not in proper format "
"(%s)") % network_uuid
raise exc.HTTPBadRequest(explanation=msg)
#fixed IP address is optional
#if the fixed IP address is not provided then
#it will use one of the available IP address from the network
address = network.get('fixed_ip', None)
if address is not None and not utils.is_valid_ipv4(address):
msg = _("Invalid fixed IP address (%s)") % address
raise exc.HTTPBadRequest(explanation=msg)
# For quantumv2, requestd_networks
# should be tuple of (network_uuid, fixed_ip, port_id)
if self._is_quantum_v2():
networks.append((network_uuid, address, port_id))
else:
# check if the network id is already present in the list,
# we don't want duplicate networks to be passed
# at the boot time
for id, ip in networks:
if id == network_uuid:
expl = (_("Duplicate networks"
" (%s) are not allowed") %
network_uuid)
raise exc.HTTPBadRequest(explanation=expl)
networks.append((network_uuid, address))
except KeyError as key:
expl = _('Bad network format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
return networks
# NOTE(vish): Without this regex, b64decode will happily
# ignore illegal bytes in the base64 encoded
# data.
B64_REGEX = re.compile('^(?:[A-Za-z0-9+\/]{4})*'
'(?:[A-Za-z0-9+\/]{2}=='
'|[A-Za-z0-9+\/]{3}=)?$')
def _decode_base64(self, data):
data = re.sub(r'\s', '', data)
if not self.B64_REGEX.match(data):
return None
try:
return base64.b64decode(data)
except TypeError:
return None
def _validate_user_data(self, user_data):
"""Check if the user_data is encoded properly."""
if not user_data:
return
if self._decode_base64(user_data) is None:
expl = _('Userdata content cannot be decoded')
raise exc.HTTPBadRequest(explanation=expl)
def _validate_access_ipv4(self, address):
try:
socket.inet_aton(address)
except socket.error:
expl = _('accessIPv4 is not proper IPv4 format')
raise exc.HTTPBadRequest(explanation=expl)
def _validate_access_ipv6(self, address):
try:
socket.inet_pton(socket.AF_INET6, address)
except socket.error:
expl = _('accessIPv6 is not proper IPv6 format')
raise exc.HTTPBadRequest(explanation=expl)
@wsgi.serializers(xml=ServerTemplate)
def show(self, req, id):
"""Returns server details by server id."""
try:
context = req.environ['nova.context']
instance = self.compute_api.get(context, id)
req.cache_db_instance(instance)
self._add_instance_faults(context, [instance])
return self._view_builder.show(req, instance)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Creates a new server for a given user."""
if not self.is_valid_body(body, 'server'):
raise exc.HTTPUnprocessableEntity()
context = req.environ['nova.context']
server_dict = body['server']
password = self._get_server_admin_password(server_dict)
if not 'name' in server_dict:
msg = _("Server name is not defined")
raise exc.HTTPBadRequest(explanation=msg)
name = server_dict['name']
self._validate_server_name(name)
name = name.strip()
image_uuid = self._image_from_req_data(body)
personality = server_dict.get('personality')
config_drive = None
if self.ext_mgr.is_loaded('os-config-drive'):
config_drive = server_dict.get('config_drive')
injected_files = []
if personality:
injected_files = self._get_injected_files(personality)
sg_names = []
if self.ext_mgr.is_loaded('os-security-groups'):
security_groups = server_dict.get('security_groups')
if security_groups is not None:
sg_names = [sg['name'] for sg in security_groups
if sg.get('name')]
if not sg_names:
sg_names.append('default')
sg_names = list(set(sg_names))
requested_networks = None
if self.ext_mgr.is_loaded('os-networks'):
requested_networks = server_dict.get('networks')
if requested_networks is not None:
requested_networks = self._get_requested_networks(
requested_networks)
(access_ip_v4, ) = server_dict.get('accessIPv4'),
if access_ip_v4 is not None:
self._validate_access_ipv4(access_ip_v4)
(access_ip_v6, ) = server_dict.get('accessIPv6'),
if access_ip_v6 is not None:
self._validate_access_ipv6(access_ip_v6)
try:
flavor_id = self._flavor_id_from_req_data(body)
except ValueError as error:
msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
# optional openstack extensions:
key_name = None
if self.ext_mgr.is_loaded('os-keypairs'):
key_name = server_dict.get('key_name')
user_data = None
if self.ext_mgr.is_loaded('os-user-data'):
user_data = server_dict.get('user_data')
self._validate_user_data(user_data)
availability_zone = None
if self.ext_mgr.is_loaded('os-availability-zone'):
availability_zone = server_dict.get('availability_zone')
block_device_mapping = None
if self.ext_mgr.is_loaded('os-volumes'):
block_device_mapping = server_dict.get('block_device_mapping', [])
for bdm in block_device_mapping:
if 'delete_on_termination' in bdm:
bdm['delete_on_termination'] = utils.bool_from_str(
bdm['delete_on_termination'])
ret_resv_id = False
# min_count and max_count are optional. If they exist, they may come
# in as strings. Verify that they are valid integers and > 0.
# Also, we want to default 'min_count' to 1, and default
# 'max_count' to be 'min_count'.
min_count = 1
max_count = 1
if self.ext_mgr.is_loaded('os-multiple-create'):
ret_resv_id = server_dict.get('return_reservation_id', False)
min_count = server_dict.get('min_count', 1)
max_count = server_dict.get('max_count', min_count)
try:
min_count = int(min_count)
except ValueError:
raise webob.exc.HTTPBadRequest(_('min_count must be an '
'integer value'))
if min_count < 1:
raise webob.exc.HTTPBadRequest(_('min_count must be > 0'))
try:
max_count = int(max_count)
except ValueError:
raise webob.exc.HTTPBadRequest(_('max_count must be an '
'integer value'))
if max_count < 1:
raise webob.exc.HTTPBadRequest(_('max_count must be > 0'))
if min_count > max_count:
raise webob.exc.HTTPBadRequest(_('min_count must be <= max_count'))
auto_disk_config = False
if self.ext_mgr.is_loaded('OS-DCF'):
auto_disk_config = server_dict.get('auto_disk_config')
scheduler_hints = {}
if self.ext_mgr.is_loaded('OS-SCH-HNT'):
scheduler_hints = server_dict.get('scheduler_hints', {})
try:
_get_inst_type = instance_types.get_instance_type_by_flavor_id
inst_type = _get_inst_type(flavor_id, read_deleted="no")
(instances, resv_id) = self.compute_api.create(context,
inst_type,
image_uuid,
display_name=name,
display_description=name,
key_name=key_name,
metadata=server_dict.get('metadata', {}),
access_ip_v4=access_ip_v4,
access_ip_v6=access_ip_v6,
injected_files=injected_files,
admin_password=password,
min_count=min_count,
max_count=max_count,
requested_networks=requested_networks,
security_group=sg_names,
user_data=user_data,
availability_zone=availability_zone,
config_drive=config_drive,
block_device_mapping=block_device_mapping,
auto_disk_config=auto_disk_config,
scheduler_hints=scheduler_hints)
except exception.QuotaError as error:
raise exc.HTTPRequestEntityTooLarge(explanation=unicode(error),
headers={'Retry-After': 0})
except exception.InstanceTypeMemoryTooSmall as error:
raise exc.HTTPBadRequest(explanation=unicode(error))
except exception.InstanceTypeNotFound as error:
raise exc.HTTPBadRequest(explanation=unicode(error))
except exception.InstanceTypeDiskTooSmall as error:
raise exc.HTTPBadRequest(explanation=unicode(error))
except exception.InvalidMetadata as error:
raise exc.HTTPBadRequest(explanation=unicode(error))
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(explanation=unicode(error))
except exception.ImageNotFound as error:
msg = _("Can not find requested image")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound as error:
msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.KeypairNotFound as error:
msg = _("Invalid key_name provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.SecurityGroupNotFound as error:
raise exc.HTTPBadRequest(explanation=unicode(error))
except rpc_common.RemoteError as err:
msg = "%(err_type)s: %(err_msg)s" % {'err_type': err.exc_type,
'err_msg': err.value}
raise exc.HTTPBadRequest(explanation=msg)
except UnicodeDecodeError as error:
msg = "UnicodeError: %s" % unicode(error)
raise exc.HTTPBadRequest(explanation=msg)
# Let the caller deal with unhandled exceptions.
# If the caller wanted a reservation_id, return it
# NOTE(treinish): XML serialization will not work without a root
# selector of 'server' however JSON return is not expecting a server
# field/object
if ret_resv_id and (req.get_content_type() == 'application/xml'):
return {'server': {'reservation_id': resv_id}}
elif ret_resv_id:
return {'reservation_id': resv_id}
req.cache_db_instances(instances)
server = self._view_builder.create(req, instances[0])
if '_is_precooked' in server['server'].keys():
del server['server']['_is_precooked']
else:
if CONF.enable_instance_password:
server['server']['adminPass'] = password
robj = wsgi.ResponseObject(server)
return self._add_location(robj)
def _delete(self, context, req, instance_uuid):
instance = self._get_server(context, req, instance_uuid)
if CONF.reclaim_instance_interval:
self.compute_api.soft_delete(context, instance)
else:
self.compute_api.delete(context, instance)
@wsgi.serializers(xml=ServerTemplate)
def update(self, req, id, body):
"""Update server then pass on to version-specific controller."""
if not self.is_valid_body(body, 'server'):
raise exc.HTTPUnprocessableEntity()
ctxt = req.environ['nova.context']
update_dict = {}
if 'name' in body['server']:
name = body['server']['name']
self._validate_server_name(name)
update_dict['display_name'] = name.strip()
if 'accessIPv4' in body['server']:
access_ipv4 = body['server']['accessIPv4']
if access_ipv4 is None:
access_ipv4 = ''
if access_ipv4:
self._validate_access_ipv4(access_ipv4)
update_dict['access_ip_v4'] = access_ipv4.strip()
if 'accessIPv6' in body['server']:
access_ipv6 = body['server']['accessIPv6']
if access_ipv6 is None:
access_ipv6 = ''
if access_ipv6:
self._validate_access_ipv6(access_ipv6)
update_dict['access_ip_v6'] = access_ipv6.strip()
if 'auto_disk_config' in body['server']:
auto_disk_config = utils.bool_from_str(
body['server']['auto_disk_config'])
update_dict['auto_disk_config'] = auto_disk_config
if 'hostId' in body['server']:
msg = _("HostId cannot be updated.")
raise exc.HTTPBadRequest(explanation=msg)
if 'personality' in body['server']:
msg = _("Personality cannot be updated.")
raise exc.HTTPBadRequest(explanation=msg)
try:
instance = self.compute_api.get(ctxt, id)
req.cache_db_instance(instance)
self.compute_api.update(ctxt, instance, **update_dict)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
instance.update(update_dict)
self._add_instance_faults(ctxt, [instance])
return self._view_builder.show(req, instance)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('confirmResize')
def _action_confirm_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.confirm_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'confirmResize')
except Exception, e:
LOG.exception(_("Error in confirm-resize %s"), e)
raise exc.HTTPBadRequest()
return exc.HTTPNoContent()
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('revertResize')
def _action_revert_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.revert_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceTypeNotFound:
msg = _("Flavor used by the instance could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'revertResize')
except Exception, e:
LOG.exception(_("Error in revert-resize %s"), e)
raise exc.HTTPBadRequest()
return webob.Response(status_int=202)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('reboot')
def _action_reboot(self, req, id, body):
if 'reboot' in body and 'type' in body['reboot']:
valid_reboot_types = ['HARD', 'SOFT']
reboot_type = body['reboot']['type'].upper()
if not valid_reboot_types.count(reboot_type):
msg = _("Argument 'type' for reboot is not HARD or SOFT")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
else:
msg = _("Missing argument 'type' for reboot")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.reboot(context, instance, reboot_type)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'reboot')
except Exception, e:
LOG.exception(_("Error in reboot %s"), e, instance=instance)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
def _resize(self, req, instance_id, flavor_id, **kwargs):
"""Begin the resize process with given instance/flavor."""
context = req.environ["nova.context"]
instance = self._get_server(context, req, instance_id)
try:
self.compute_api.resize(context, instance, flavor_id, **kwargs)
except exception.FlavorNotFound:
msg = _("Unable to locate requested flavor.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.CannotResizeToSameFlavor:
msg = _("Resize requires a flavor change.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resize')
return webob.Response(status_int=202)
@wsgi.response(204)
def delete(self, req, id):
"""Destroys a server."""
try:
self._delete(req.environ['nova.context'], req, id)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'delete')
def _image_ref_from_req_data(self, data):
try:
return unicode(data['server']['imageRef'])
except (TypeError, KeyError):
msg = _("Missing imageRef attribute")
raise exc.HTTPBadRequest(explanation=msg)
def _image_uuid_from_href(self, image_href):
# If the image href was generated by nova api, strip image_href
# down to an id and use the default glance connection params
image_uuid = image_href.split('/').pop()
if not uuidutils.is_uuid_like(image_uuid):
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
return image_uuid
def _image_from_req_data(self, data):
"""
Get image data from the request or raise appropriate
exceptions
If no image is supplied - checks to see if there is
block devices set and proper extesions loaded.
"""
image_ref = data['server'].get('imageRef')
bdm = data['server'].get('block_device_mapping')
if not image_ref and bdm and self.ext_mgr.is_loaded('os-volumes'):
return ''
else:
image_href = self._image_ref_from_req_data(data)
image_uuid = self._image_uuid_from_href(image_href)
return image_uuid
def _flavor_id_from_req_data(self, data):
try:
flavor_ref = data['server']['flavorRef']
except (TypeError, KeyError):
msg = _("Missing flavorRef attribute")
raise exc.HTTPBadRequest(explanation=msg)
return common.get_id_from_href(flavor_ref)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('changePassword')
def _action_change_password(self, req, id, body):
context = req.environ['nova.context']
if (not 'changePassword' in body
or not 'adminPass' in body['changePassword']):
msg = _("No adminPass was specified")
raise exc.HTTPBadRequest(explanation=msg)
password = body['changePassword']['adminPass']
if not isinstance(password, basestring):
msg = _("Invalid adminPass")
raise exc.HTTPBadRequest(explanation=msg)
server = self._get_server(context, req, id)
self.compute_api.set_admin_password(context, server, password)
return webob.Response(status_int=202)
def _validate_metadata(self, metadata):
"""Ensure that we can work with the metadata given."""
try:
metadata.iteritems()
except AttributeError:
msg = _("Unable to parse metadata key/value pairs.")
LOG.debug(msg)
raise exc.HTTPBadRequest(explanation=msg)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('resize')
def _action_resize(self, req, id, body):
"""Resizes a given instance to the flavor size requested."""
try:
flavor_ref = body["resize"]["flavorRef"]
if not flavor_ref:
msg = _("Resize request has invalid 'flavorRef' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
except (KeyError, TypeError):
msg = _("Resize requests require 'flavorRef' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
kwargs = {}
if 'auto_disk_config' in body['resize']:
kwargs['auto_disk_config'] = body['resize']['auto_disk_config']
return self._resize(req, id, flavor_ref, **kwargs)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('rebuild')
def _action_rebuild(self, req, id, body):
"""Rebuild an instance with the given attributes."""
try:
body = body['rebuild']
except (KeyError, TypeError):
raise exc.HTTPBadRequest(_("Invalid request body"))
try:
image_href = body["imageRef"]
except (KeyError, TypeError):
msg = _("Could not parse imageRef from request.")
raise exc.HTTPBadRequest(explanation=msg)
image_href = self._image_uuid_from_href(image_href)
try:
password = body['adminPass']
except (KeyError, TypeError):
password = utils.generate_password()
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
attr_map = {
'personality': 'files_to_inject',
'name': 'display_name',
'accessIPv4': 'access_ip_v4',
'accessIPv6': 'access_ip_v6',
'metadata': 'metadata',
'auto_disk_config': 'auto_disk_config',
}
if 'accessIPv4' in body:
self._validate_access_ipv4(body['accessIPv4'])
if 'accessIPv6' in body:
self._validate_access_ipv6(body['accessIPv6'])
if 'name' in body:
self._validate_server_name(body['name'])
kwargs = {}
for request_attribute, instance_attribute in attr_map.items():
try:
kwargs[instance_attribute] = body[request_attribute]
except (KeyError, TypeError):
pass
self._validate_metadata(kwargs.get('metadata', {}))
if 'files_to_inject' in kwargs:
personality = kwargs['files_to_inject']
kwargs['files_to_inject'] = self._get_injected_files(personality)
try:
self.compute_api.rebuild(context,
instance,
image_href,
password,
**kwargs)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'rebuild')
except exception.InstanceNotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InvalidMetadata as error:
raise exc.HTTPBadRequest(explanation=unicode(error))
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(explanation=unicode(error))
except exception.ImageNotFound:
msg = _("Cannot find image for rebuild")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceTypeMemoryTooSmall as error:
raise exc.HTTPBadRequest(explanation=unicode(error))
except exception.InstanceTypeDiskTooSmall as error:
raise exc.HTTPBadRequest(explanation=unicode(error))
instance = self._get_server(context, req, id)
self._add_instance_faults(context, [instance])
view = self._view_builder.show(req, instance)
# Add on the adminPass attribute since the view doesn't do it
# unless instance passwords are disabled
if CONF.enable_instance_password:
view['server']['adminPass'] = password
robj = wsgi.ResponseObject(view)
return self._add_location(robj)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('createImage')
@common.check_snapshots_enabled
def _action_create_image(self, req, id, body):
"""Snapshot a server instance."""
context = req.environ['nova.context']
entity = body.get("createImage", {})
image_name = entity.get("name")
if not image_name:
msg = _("createImage entity requires name attribute")
raise exc.HTTPBadRequest(explanation=msg)
props = {}
metadata = entity.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
try:
props.update(metadata)
except ValueError:
msg = _("Invalid metadata")
raise exc.HTTPBadRequest(explanation=msg)
instance = self._get_server(context, req, id)
bdms = self.compute_api.get_instance_bdms(context, instance)
try:
if self.compute_api.is_volume_backed_instance(context, instance,
bdms):
img = instance['image_ref']
src_image = self.compute_api.image_service.show(context, img)
image_meta = dict(src_image)
image = self.compute_api.snapshot_volume_backed(
context,
instance,
image_meta,
image_name,
extra_properties=props)
else:
image = self.compute_api.snapshot(context,
instance,
image_name,
extra_properties=props)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'createImage')
# build location of newly-created image entity
image_id = str(image['id'])
image_ref = os.path.join(req.application_url,
context.project_id,
'images',
image_id)
resp = webob.Response(status_int=202)
resp.headers['Location'] = image_ref
return resp
def _get_server_admin_password(self, server):
"""Determine the admin password for a server on creation."""
try:
password = server['adminPass']
self._validate_admin_password(password)
except KeyError:
password = utils.generate_password()
except ValueError:
raise exc.HTTPBadRequest(explanation=_("Invalid adminPass"))
return password
def _validate_admin_password(self, password):
if not isinstance(password, basestring):
raise ValueError()
def _get_server_search_options(self):
"""Return server search options allowed by non-admin."""
return ('reservation_id', 'name', 'status', 'image', 'flavor',
'changes-since', 'all_tenants')
def create_resource(ext_mgr):
return wsgi.Resource(Controller(ext_mgr))
def remove_invalid_options(context, search_options, allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
if opt not in allowed_search_options]
unk_opt_str = ", ".join(unknown_options)
log_msg = _("Removing options '%(unk_opt_str)s' from query") % locals()
LOG.debug(log_msg)
for opt in unknown_options:
search_options.pop(opt, None)
| apache-2.0 |
sserrot/champion_relationships | venv/Lib/site-packages/win32comext/directsound/test/ds_test.py | 28 | 12476 | import unittest
import struct
import sys
import os
import pywintypes
import win32event, win32api
import os
from pywin32_testutil import str2bytes, TestSkipped
import win32com.directsound.directsound as ds
# next two lines are for for debugging:
# import win32com
# import directsound as ds
WAV_FORMAT_PCM = 1
WAV_HEADER_SIZE = struct.calcsize('<4sl4s4slhhllhh4sl')
def wav_header_unpack(data):
(riff, riffsize, wave, fmt, fmtsize, format, nchannels, samplespersecond,
datarate, blockalign, bitspersample, data, datalength) \
= struct.unpack('<4sl4s4slhhllhh4sl', data)
if riff != str2bytes('RIFF'):
raise ValueError('invalid wav header')
if fmtsize != 16 or fmt != str2bytes('fmt ') or str2bytes(data) != 'data':
# fmt chuck is not first chunk, directly followed by data chuck
# It is nowhere required that they are, it is just very common
raise ValueError('cannot understand wav header')
wfx = pywintypes.WAVEFORMATEX()
wfx.wFormatTag = format
wfx.nChannels = nchannels
wfx.nSamplesPerSec = samplespersecond
wfx.nAvgBytesPerSec = datarate
wfx.nBlockAlign = blockalign
wfx.wBitsPerSample = bitspersample
return wfx, datalength
def wav_header_pack(wfx, datasize):
return struct.pack('<4sl4s4slhhllhh4sl', 'RIFF', 36 + datasize,
'WAVE', 'fmt ', 16,
wfx.wFormatTag, wfx.nChannels, wfx.nSamplesPerSec,
wfx.nAvgBytesPerSec, wfx.nBlockAlign,
wfx.wBitsPerSample, 'data', datasize);
class WAVEFORMATTest(unittest.TestCase):
def test_1_Type(self):
'WAVEFORMATEX type'
w = pywintypes.WAVEFORMATEX()
self.failUnless(type(w) == pywintypes.WAVEFORMATEXType)
def test_2_Attr(self):
'WAVEFORMATEX attribute access'
# A wav header for a soundfile from a CD should look like this...
w = pywintypes.WAVEFORMATEX()
w.wFormatTag = pywintypes.WAVE_FORMAT_PCM
w.nChannels = 2
w.nSamplesPerSec = 44100
w.nAvgBytesPerSec = 176400
w.nBlockAlign = 4
w.wBitsPerSample = 16
self.failUnless(w.wFormatTag == 1)
self.failUnless(w.nChannels == 2)
self.failUnless(w.nSamplesPerSec == 44100)
self.failUnless(w.nAvgBytesPerSec == 176400)
self.failUnless(w.nBlockAlign == 4)
self.failUnless(w.wBitsPerSample == 16)
class DSCAPSTest(unittest.TestCase):
def test_1_Type(self):
'DSCAPS type'
c = ds.DSCAPS()
self.failUnless(type(c) == ds.DSCAPSType)
def test_2_Attr(self):
'DSCAPS attribute access'
c = ds.DSCAPS()
c.dwFlags = 1
c.dwMinSecondarySampleRate = 2
c.dwMaxSecondarySampleRate = 3
c.dwPrimaryBuffers = 4
c.dwMaxHwMixingAllBuffers = 5
c.dwMaxHwMixingStaticBuffers = 6
c.dwMaxHwMixingStreamingBuffers = 7
c.dwFreeHwMixingAllBuffers = 8
c.dwFreeHwMixingStaticBuffers = 9
c.dwFreeHwMixingStreamingBuffers = 10
c.dwMaxHw3DAllBuffers = 11
c.dwMaxHw3DStaticBuffers = 12
c.dwMaxHw3DStreamingBuffers = 13
c.dwFreeHw3DAllBuffers = 14
c.dwFreeHw3DStaticBuffers = 15
c.dwFreeHw3DStreamingBuffers = 16
c.dwTotalHwMemBytes = 17
c.dwFreeHwMemBytes = 18
c.dwMaxContigFreeHwMemBytes = 19
c.dwUnlockTransferRateHwBuffers = 20
c.dwPlayCpuOverheadSwBuffers = 21
self.failUnless(c.dwFlags == 1)
self.failUnless(c.dwMinSecondarySampleRate == 2)
self.failUnless(c.dwMaxSecondarySampleRate == 3)
self.failUnless(c.dwPrimaryBuffers == 4)
self.failUnless(c.dwMaxHwMixingAllBuffers == 5)
self.failUnless(c.dwMaxHwMixingStaticBuffers == 6)
self.failUnless(c.dwMaxHwMixingStreamingBuffers == 7)
self.failUnless(c.dwFreeHwMixingAllBuffers == 8)
self.failUnless(c.dwFreeHwMixingStaticBuffers == 9)
self.failUnless(c.dwFreeHwMixingStreamingBuffers == 10)
self.failUnless(c.dwMaxHw3DAllBuffers == 11)
self.failUnless(c.dwMaxHw3DStaticBuffers == 12)
self.failUnless(c.dwMaxHw3DStreamingBuffers == 13)
self.failUnless(c.dwFreeHw3DAllBuffers == 14)
self.failUnless(c.dwFreeHw3DStaticBuffers == 15)
self.failUnless(c.dwFreeHw3DStreamingBuffers == 16)
self.failUnless(c.dwTotalHwMemBytes == 17)
self.failUnless(c.dwFreeHwMemBytes == 18)
self.failUnless(c.dwMaxContigFreeHwMemBytes == 19)
self.failUnless(c.dwUnlockTransferRateHwBuffers == 20)
self.failUnless(c.dwPlayCpuOverheadSwBuffers == 21)
class DSBCAPSTest(unittest.TestCase):
def test_1_Type(self):
'DSBCAPS type'
c = ds.DSBCAPS()
self.failUnless(type(c) == ds.DSBCAPSType)
def test_2_Attr(self):
'DSBCAPS attribute access'
c = ds.DSBCAPS()
c.dwFlags = 1
c.dwBufferBytes = 2
c.dwUnlockTransferRate = 3
c.dwPlayCpuOverhead = 4
self.failUnless(c.dwFlags == 1)
self.failUnless(c.dwBufferBytes == 2)
self.failUnless(c.dwUnlockTransferRate == 3)
self.failUnless(c.dwPlayCpuOverhead == 4)
class DSCCAPSTest(unittest.TestCase):
def test_1_Type(self):
'DSCCAPS type'
c = ds.DSCCAPS()
self.failUnless(type(c) == ds.DSCCAPSType)
def test_2_Attr(self):
'DSCCAPS attribute access'
c = ds.DSCCAPS()
c.dwFlags = 1
c.dwFormats = 2
c.dwChannels = 4
self.failUnless(c.dwFlags == 1)
self.failUnless(c.dwFormats == 2)
self.failUnless(c.dwChannels == 4)
class DSCBCAPSTest(unittest.TestCase):
def test_1_Type(self):
'DSCBCAPS type'
c = ds.DSCBCAPS()
self.failUnless(type(c) == ds.DSCBCAPSType)
def test_2_Attr(self):
'DSCBCAPS attribute access'
c = ds.DSCBCAPS()
c.dwFlags = 1
c.dwBufferBytes = 2
self.failUnless(c.dwFlags == 1)
self.failUnless(c.dwBufferBytes == 2)
class DSBUFFERDESCTest(unittest.TestCase):
def test_1_Type(self):
'DSBUFFERDESC type'
c = ds.DSBUFFERDESC()
self.failUnless(type(c) == ds.DSBUFFERDESCType)
def test_2_Attr(self):
'DSBUFFERDESC attribute access'
c = ds.DSBUFFERDESC()
c.dwFlags = 1
c.dwBufferBytes = 2
c.lpwfxFormat = pywintypes.WAVEFORMATEX()
c.lpwfxFormat.wFormatTag = pywintypes.WAVE_FORMAT_PCM
c.lpwfxFormat.nChannels = 2
c.lpwfxFormat.nSamplesPerSec = 44100
c.lpwfxFormat.nAvgBytesPerSec = 176400
c.lpwfxFormat.nBlockAlign = 4
c.lpwfxFormat.wBitsPerSample = 16
self.failUnless(c.dwFlags == 1)
self.failUnless(c.dwBufferBytes == 2)
self.failUnless(c.lpwfxFormat.wFormatTag == 1)
self.failUnless(c.lpwfxFormat.nChannels == 2)
self.failUnless(c.lpwfxFormat.nSamplesPerSec == 44100)
self.failUnless(c.lpwfxFormat.nAvgBytesPerSec == 176400)
self.failUnless(c.lpwfxFormat.nBlockAlign == 4)
self.failUnless(c.lpwfxFormat.wBitsPerSample == 16)
def invalid_format(self, c):
c.lpwfxFormat = 17
def test_3_invalid_format(self):
'DSBUFFERDESC invalid lpwfxFormat assignment'
c = ds.DSBUFFERDESC()
self.failUnlessRaises(ValueError, self.invalid_format, c)
class DSCBUFFERDESCTest(unittest.TestCase):
def test_1_Type(self):
'DSCBUFFERDESC type'
c = ds.DSCBUFFERDESC()
self.failUnless(type(c) == ds.DSCBUFFERDESCType)
def test_2_Attr(self):
'DSCBUFFERDESC attribute access'
c = ds.DSCBUFFERDESC()
c.dwFlags = 1
c.dwBufferBytes = 2
c.lpwfxFormat = pywintypes.WAVEFORMATEX()
c.lpwfxFormat.wFormatTag = pywintypes.WAVE_FORMAT_PCM
c.lpwfxFormat.nChannels = 2
c.lpwfxFormat.nSamplesPerSec = 44100
c.lpwfxFormat.nAvgBytesPerSec = 176400
c.lpwfxFormat.nBlockAlign = 4
c.lpwfxFormat.wBitsPerSample = 16
self.failUnless(c.dwFlags == 1)
self.failUnless(c.dwBufferBytes == 2)
self.failUnless(c.lpwfxFormat.wFormatTag == 1)
self.failUnless(c.lpwfxFormat.nChannels == 2)
self.failUnless(c.lpwfxFormat.nSamplesPerSec == 44100)
self.failUnless(c.lpwfxFormat.nAvgBytesPerSec == 176400)
self.failUnless(c.lpwfxFormat.nBlockAlign == 4)
self.failUnless(c.lpwfxFormat.wBitsPerSample == 16)
def invalid_format(self, c):
c.lpwfxFormat = 17
def test_3_invalid_format(self):
'DSCBUFFERDESC invalid lpwfxFormat assignment'
c = ds.DSCBUFFERDESC()
self.failUnlessRaises(ValueError, self.invalid_format, c)
class DirectSoundTest(unittest.TestCase):
# basic tests - mostly just exercise the functions
def testEnumerate(self):
'''DirectSoundEnumerate() sanity tests'''
devices = ds.DirectSoundEnumerate()
# this might fail on machines without a sound card
self.failUnless(len(devices))
# if we have an entry, it must be a tuple of size 3
self.failUnless(len(devices[0]) == 3)
def testCreate(self):
'''DirectSoundCreate()'''
d = ds.DirectSoundCreate(None, None)
def testPlay(self):
'''Mesdames et Messieurs, la cour de Devin Dazzle'''
# look for the test file in various places
candidates = [
os.path.dirname(__file__),
os.path.dirname(sys.argv[0]),
# relative to 'testall.py' in the win32com test suite.
os.path.join(os.path.dirname(sys.argv[0]),
'../../win32comext/directsound/test'),
'.',
]
for candidate in candidates:
fname=os.path.join(candidate, "01-Intro.wav")
if os.path.isfile(fname):
break
else:
raise TestSkipped("Can't find test .wav file to play")
f = open(fname, 'rb')
hdr = f.read(WAV_HEADER_SIZE)
wfx, size = wav_header_unpack(hdr)
d = ds.DirectSoundCreate(None, None)
d.SetCooperativeLevel(None, ds.DSSCL_PRIORITY)
sdesc = ds.DSBUFFERDESC()
sdesc.dwFlags = ds.DSBCAPS_STICKYFOCUS | ds.DSBCAPS_CTRLPOSITIONNOTIFY
sdesc.dwBufferBytes = size
sdesc.lpwfxFormat = wfx
buffer = d.CreateSoundBuffer(sdesc, None)
event = win32event.CreateEvent(None, 0, 0, None)
notify = buffer.QueryInterface(ds.IID_IDirectSoundNotify)
notify.SetNotificationPositions((ds.DSBPN_OFFSETSTOP, event))
buffer.Update(0, f.read(size))
buffer.Play(0)
win32event.WaitForSingleObject(event, -1)
class DirectSoundCaptureTest(unittest.TestCase):
# basic tests - mostly just exercise the functions
def testEnumerate(self):
'''DirectSoundCaptureEnumerate() sanity tests'''
devices = ds.DirectSoundCaptureEnumerate()
# this might fail on machines without a sound card
self.failUnless(len(devices))
# if we have an entry, it must be a tuple of size 3
self.failUnless(len(devices[0]) == 3)
def testCreate(self):
'''DirectSoundCreate()'''
d = ds.DirectSoundCaptureCreate(None, None)
def testRecord(self):
d = ds.DirectSoundCaptureCreate(None, None)
sdesc = ds.DSCBUFFERDESC()
sdesc.dwBufferBytes = 352800 # 2 seconds
sdesc.lpwfxFormat = pywintypes.WAVEFORMATEX()
sdesc.lpwfxFormat.wFormatTag = pywintypes.WAVE_FORMAT_PCM
sdesc.lpwfxFormat.nChannels = 2
sdesc.lpwfxFormat.nSamplesPerSec = 44100
sdesc.lpwfxFormat.nAvgBytesPerSec = 176400
sdesc.lpwfxFormat.nBlockAlign = 4
sdesc.lpwfxFormat.wBitsPerSample = 16
buffer = d.CreateCaptureBuffer(sdesc)
event = win32event.CreateEvent(None, 0, 0, None)
notify = buffer.QueryInterface(ds.IID_IDirectSoundNotify)
notify.SetNotificationPositions((ds.DSBPN_OFFSETSTOP, event))
buffer.Start(0)
win32event.WaitForSingleObject(event, -1)
event.Close()
data = buffer.Update(0, 352800)
fname=os.path.join(win32api.GetTempPath(), 'test_directsound_record.wav')
f = open(fname, 'wb')
f.write(wav_header_pack(sdesc.lpwfxFormat, 352800))
f.write(data)
f.close()
if __name__ == '__main__':
unittest.main()
| mit |
tta/gnuradio-tta | gnuradio-core/src/python/gnuradio/gr/qa_nlog10.py | 11 | 1510 | #!/usr/bin/env python
#
# Copyright 2005,2007,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
class test_nlog10(gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001(self):
src_data = (-10, 0, 10, 100, 1000, 10000, 100000)
expected_result = (-180, -180, 10, 20, 30, 40, 50)
src = gr.vector_source_f(src_data)
op = gr.nlog10_ff(10)
dst = gr.vector_sink_f()
self.tb.connect (src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertFloatTuplesAlmostEqual (expected_result, result_data)
if __name__ == '__main__':
gr_unittest.run(test_nlog10, "test_nlog10.xml")
| gpl-3.0 |
aristotle-tek/cuny-bdif | AWS/ec2/lib/boto-2.34.0/tests/integration/swf/test_cert_verification.py | 126 | 1553 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on all service endpoints validate.
"""
import unittest
from tests.integration import ServiceCertVerificationTest
import boto.swf
class SWFCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
swf = True
regions = boto.swf.regions()
def sample_service_call(self, conn):
conn.list_domains('REGISTERED')
| mit |
abhisheksugam/Climate_Police | Climate_Police/tests/plot_pollutants.py | 1 | 3256 |
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import plotly.offline as py
import plotly.graph_objs as go
from plotly.graph_objs import *
import plotly.tools as tls
import seaborn as sns
import plotly
plotly.offline.init_notebook_mode()
def plot_pollutants(df, year, state):
#split the date into three columns
df["year"], df["month"], df["day"] = zip(*df["Date Local"].apply(lambda x: x.split('-', 2)))
#multiindex dataframe with Year and State and groupby mean
df2 = df.groupby(['year', 'State']).mean()
#removed useless columns
del df2['State Code']
del df2['County Code']
del df2['Site Num']
del df2['Unnamed: 0']
#create a new dataframe with the users input
df3 = df2.loc[year, state]
df4 = df3.round(4)
# plot all levels of pollutants per Year and State
trace1 = go.Scatter(
x=df4.index[0:4], y=df4[0:4],
mode = 'markers',
marker=dict(
size='16',
colorscale='Viridis',
showscale=False
),
line=Line(
color='#FFD700',
width=3
),
name='NO2'
)
trace2 = go.Scatter(
x=df4.index[4:8], y=df4[4:8],
mode = 'markers',
marker=dict(
size='16',
colorscale='Viridis',
showscale=False
),
line=Line(
color='#C0C0C0',
width=3
),
name='O3'
)
trace3 = go.Scatter(
x=df4.index[8:12], y=df4[8:12],
mode = 'markers',
marker=dict(
size='16',
colorscale='Viridis',
showscale=False
),
line=Line(
color='#BA8651',
width=3
),
name='SO2'
)
trace4 = go.Scatter(
x=df4.index[12:16], y=df4[12:16],
mode = 'markers',
marker=dict(
size='16',
colorscale='Viridis',
showscale=False
),
line=Line(
color='#000000',
width=4
),
name='CO'
)
data = Data([ trace1, trace2, trace3, trace4])
layout = Layout(
title='Levels of pollutants in ' + state + ". " + "Year: " + year,
updatemenus=list([
dict(
x=-0.05,
y=1,
yanchor='top',
buttons=list([
dict(
args=['visible', [True, True, True, True]],
label='All',
method='restyle'
),
dict(
args=['visible', [True, False, False, False]],
label='NO2',
method='restyle'
),
dict(
args=['visible', [False, True, False, False]],
label='O3',
method='restyle'
),
dict(
args=['visible', [False, False, True, False]],
label='SO2',
method='restyle'
),
dict(
args=['visible', [False, False, False, True]],
label='CO',
method='restyle'
)
]),
)
]),
)
fig = Figure(data=data, layout=layout)
py.iplot(fig)
plotSuccessful= "Levels of pollutants plotted."
return fig, plotSuccessful
| mit |
itu-oss-project-team/oss-github-analysis-project | github_analysis_tool/analyzer/abstract_analyzer.py | 1 | 6275 | import abc
import os.path
import time
import sys
import logging
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from github_analysis_tool import OUTPUT_DIR, secret_config
from github_analysis_tool.services.database_service import DatabaseService
from github_analysis_tool.services.graph_service import WeightedUndirectedGraph
class AbstractAnalyzer(object):
"""
An abstract class for network based analysis
All inherited classes should override create_matrix method
"""
__metaclass__ = abc.ABCMeta
def __init__(self, name):
self._name = name
self._databaseService = DatabaseService()
self._matrices_folder_path = os.path.join(OUTPUT_DIR, self._name + '_matrices')
if not os.path.exists(self._matrices_folder_path):
os.makedirs(self._matrices_folder_path)
self._metrics_file_path = os.path.join(OUTPUT_DIR, self._name + "_metrics.csv")
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO,
filename=os.path.join(OUTPUT_DIR, "generate_matrices_log.log"))
@abc.abstractmethod
def create_matrix(self, repo_id) -> dict:
pass
def analyze_repo(self, repo_full_name):
repository = self._databaseService.get_repo_by_full_name(repo_full_name)
repo_id = repository['id']
repo_name_underscored = str(repo_full_name).replace("/", "_")
start_time = time.time()
print("---> Starting " + self._name + " analysis for repo: " + repo_full_name)
logging.info("---> Starting " + self._name + " analysis for repo: " + repo_full_name)
network_matrix = self.create_matrix(repo_id)
print("[" + repo_full_name + "]: " + self._name + " Matrix generated in " + "{0:.2f}".format(time.time() - start_time) + " seconds.")
logging.info("[" + repo_full_name + "]: " + self._name + " Matrix generated in " + "{0:.2f}".format(time.time() - start_time) + " seconds.")
checkpoint_time = time.time()
graph = self.__create_graph(network_matrix) # create graph of the matrix.
print("[" + repo_full_name + "]: " + self._name + " Graph generated in " + "{0:.2f}".format(time.time() - checkpoint_time) + " seconds.")
logging.info("[" + repo_full_name + "]: " + self._name + " Graph generated in " + "{0:.2f}".format(time.time() - checkpoint_time) + " seconds.")
checkpoint_time = time.time()
repo_metrics = graph.calculate_metrics()
print("[" + repo_full_name + "]: " + self._name + " Graph analyzed in " + "{0:.2f}".format(time.time() - checkpoint_time) + " seconds.")
logging.info("[" + repo_full_name + "]: " + self._name + " Graph analyzed in " + "{0:.2f}".format(time.time() - checkpoint_time) + " seconds.")
checkpoint_time = time.time()
network_file_path = os.path.join(self._matrices_folder_path, str(repo_name_underscored) + ".csv")
self.__export_csv(network_matrix, network_file_path)
print("[" + repo_full_name + "]: " + self._name + " Network matrix exported to CSV in " + "{0:.2f}".format(time.time() - checkpoint_time) + " seconds.")
logging.info("[" + repo_full_name + "]: " + self._name + " Network matrix exported to CSV in " + "{0:.2f}".format(time.time() - checkpoint_time) + " seconds.")
checkpoint_time = time.time()
graph.export_metrics(repo_metrics, repo_full_name, self._metrics_file_path)
print("[" + repo_full_name + "]: " + self._name + " Repo metrics exported to CSV in " + "{0:.2f}".format(time.time() - checkpoint_time) + " seconds.")
logging.info("[" + repo_full_name + "]: " + self._name + " Repo metrics exported to CSV in " + "{0:.2f}".format(time.time() - checkpoint_time) + " seconds.")
elapsed_time = time.time() - start_time
print("---> Finishing " + self._name + " analysis for repo: " + str(repo_full_name) + ") in " + "{0:.2f}".format(elapsed_time) + " seconds.")
print()# Empty line
directory_path = os.path.dirname(os.path.realpath(__file__))
repositories_file_path = os.path.join(directory_path, 'finished_repositories.txt')
with open('finished_repositories.txt', "a") as finished_repos:
finished_repos.write(repo_full_name + "\n")
def __create_graph(self, network_matrix):
graph = WeightedUndirectedGraph()
edge_list = set()
weight_list = []
for node_1 in network_matrix.keys():
for node_2 in network_matrix[node_1].keys():
if network_matrix[node_1][node_2] != 0:
# We need to add one single undirected edge
_edge = (node_1, node_2)
edge = tuple(sorted(_edge)) #sort the edge to get a single edge pair
if edge not in edge_list:
edge_list.add(edge)
weight_list.append(network_matrix[node_1][node_2])
graph.g.add_edge_list(edge_list, hashed=True, eprops=None)
graph.g.ep.weight.a = weight_list
return graph
def __export_csv(self, network_matrix, file_path):
"""
Generating a CSV file which is in following format: (A,B,C... are node names)
;A;B;C;D;E
A;0;1;0;1;0
B;1;0;0;0;0
C;0;0;1;0;0
D;0;1;0;1;0
E;0;0;0;0;0
:param repo_id: In order to name file
:param network_matrix: 2D dict for network matrix
:return: None
"""
nodes = network_matrix.keys()
with open(file_path, "w") as out_file:
out_file.write(";")
for file in nodes:
out_file.write("%s;" % file)
out_file.write("\n")
for node_1 in nodes:
out_file.write("%s;" % node_1)
for node_2 in nodes:
if node_1 not in network_matrix:
out_file.write("%d;" % 0)
continue
if not node_2 in network_matrix[node_1]:
out_file.write("%d;" % 0)
else:
out_file.write("%f;" % network_matrix[node_1][node_2])
out_file.write("\n") | mit |
haoyuchen1992/CourseBuilder | tests/functional/modules_search.py | 4 | 13398 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for modules/search/."""
__author__ = 'Ellis Michael (emichael@google.com)'
import datetime
import logging
import re
import actions
from controllers import sites
from models import courses
from models import custom_modules
from modules.announcements import announcements
from modules.search import search
from tests.unit import modules_search as search_unit_test
from google.appengine.api import namespace_manager
class SearchTest(search_unit_test.SearchTestBase):
"""Tests the search module."""
# Don't require documentation for self-describing test methods.
# pylint: disable=g-missing-docstring
@classmethod
def enable_module(cls):
custom_modules.Registry.registered_modules[
search.MODULE_NAME].enable()
assert search.custom_module.enabled
@classmethod
def disable_module(cls):
custom_modules.Registry.registered_modules[
search.MODULE_NAME].disable()
assert not search.custom_module.enabled
@classmethod
def get_xsrf_token(cls, body, form_name):
match = re.search(form_name + r'.+[\n\r].+value="([^"]+)"', body)
assert match
return match.group(1)
def index_test_course(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = self.get('/test/dashboard?action=search')
index_token = self.get_xsrf_token(response.body, 'gcb-index-course')
response = self.post('/test/dashboard?action=index_course',
{'xsrf_token': index_token})
self.execute_all_deferred_tasks()
def setUp(self): # Name set by parent. pylint: disable=g-bad-name
super(SearchTest, self).setUp()
self.enable_module()
self.logged_error = ''
def error_report(string, *args, **unused_kwargs):
self.logged_error = string % args
self.error_report = error_report
def test_module_disabled(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
self.disable_module()
response = self.get('/search?query=lorem', expect_errors=True)
self.assertEqual(response.status_code, 404)
response = self.get('dashboard?action=search')
self.assertIn(
'Google ><a href="%s"> Dashboard </a>> Search' %
self.canonicalize('dashboard'),
response.body)
self.assertNotIn('Index Course', response.body)
self.assertNotIn('Clear Index', response.body)
def test_module_enabled(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = self.get('course')
self.assertIn('gcb-search-box', response.body)
response = self.get('/search?query=lorem')
self.assertEqual(response.status_code, 200)
response = self.get('dashboard?action=search')
self.assertIn(
'Google ><a href="%s"> Dashboard </a>> Search' %
self.canonicalize('dashboard'),
response.body)
self.assertIn('Index Course', response.body)
self.assertIn('Clear Index', response.body)
def test_indexing_and_clearing_buttons(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = self.get('dashboard?action=search')
index_token = self.get_xsrf_token(response.body, 'gcb-index-course')
clear_token = self.get_xsrf_token(response.body, 'gcb-clear-index')
response = self.post('dashboard?action=index_course',
{'xsrf_token': index_token})
self.assertEqual(response.status_int, 302)
response = self.post('dashboard?action=clear_index',
{'xsrf_token': clear_token})
self.assertEqual(response.status_int, 302)
response = self.post('dashboard?action=index_course', {},
expect_errors=True)
assert response.status_int == 403
response = self.post('dashboard?action=clear_index', {},
expect_errors=True)
assert response.status_int == 403
def test_index_search_clear(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = self.get('dashboard?action=search')
index_token = self.get_xsrf_token(response.body, 'gcb-index-course')
clear_token = self.get_xsrf_token(response.body, 'gcb-clear-index')
response = self.post('dashboard?action=index_course',
{'xsrf_token': index_token})
self.execute_all_deferred_tasks()
# weather is a term found in the Power Searching Course and should not
# be in the HTML returned by the patched urlfetch in SearchTestBase
response = self.get('search?query=weather')
self.assertNotIn('gcb-search-result', response.body)
# This term should be present as it is in the dummy content.
response = self.get('search?query=cogito%20ergo%20sum')
self.assertIn('gcb-search-result', response.body)
response = self.post('dashboard?action=clear_index',
{'xsrf_token': clear_token})
self.execute_all_deferred_tasks()
# After the index is cleared, it shouldn't match anything
response = self.get('search?query=cogito%20ergo%20sum')
self.assertNotIn('gcb-search-result', response.body)
def test_bad_search(self):
email = 'user@google.com'
actions.login(email, is_admin=False)
# %3A is a colon, and searching for only punctuation will cause App
# Engine's search to throw an error that should be handled
response = self.get('search?query=%3A')
self.assertEqual(response.status_int, 200)
self.assertIn('gcb-search-info', response.body)
def test_errors_not_displayed_to_user(self):
exception_code = '0xDEADBEEF'
def bad_fetch(*unused_vargs, **unused_kwargs):
raise Exception(exception_code)
self.swap(search, 'fetch', bad_fetch)
self.swap(logging, 'error', self.error_report)
response = self.get('search?query=cogito')
self.assertEqual(response.status_int, 200)
self.assertIn('unavailable', response.body)
self.assertNotIn('gcb-search-result', response.body)
self.assertIn('gcb-search-info', response.body)
self.assertIn(exception_code, self.logged_error)
def test_unicode_pages(self):
sites.setup_courses('course:/test::ns_test, course:/:/')
course = courses.Course(None,
app_context=sites.get_all_courses()[0])
unit = course.add_unit()
unit.now_available = True
lesson_a = course.add_lesson(unit)
lesson_a.notes = search_unit_test.UNICODE_PAGE_URL
lesson_a.now_available = True
course.update_unit(unit)
course.save()
self.index_test_course()
self.swap(logging, 'error', self.error_report)
response = self.get('/test/search?query=paradox')
self.assertEqual('', self.logged_error)
self.assertNotIn('unavailable', response.body)
self.assertIn('gcb-search-result', response.body)
def test_external_links(self):
sites.setup_courses('course:/test::ns_test, course:/:/')
course = courses.Course(None, app_context=sites.get_all_courses()[0])
unit = course.add_unit()
unit.now_available = True
lesson_a = course.add_lesson(unit)
lesson_a.notes = search_unit_test.VALID_PAGE_URL
objectives_link = 'http://objectiveslink.null/'
lesson_a.objectives = '<a href="%s"></a><a href="%s"></a>' % (
search_unit_test.LINKED_PAGE_URL, objectives_link)
lesson_a.now_available = True
course.update_unit(unit)
course.save()
self.index_test_course()
response = self.get('/test/search?query=What%20hath%20God%20wrought')
self.assertIn('gcb-search-result', response.body)
response = self.get('/test/search?query=Cogito')
self.assertIn('gcb-search-result', response.body)
self.assertIn(search_unit_test.VALID_PAGE_URL, response.body)
self.assertIn(objectives_link, response.body)
self.assertNotIn(search_unit_test.PDF_URL, response.body)
# If this test fails, indexing will crawl the entire web
response = self.get('/test/search?query=ABORT')
self.assertNotIn('gcb-search-result', response.body)
self.assertNotIn(search_unit_test.SECOND_LINK_PAGE_URL, response.body)
def test_youtube(self):
sites.setup_courses('course:/test::ns_test, course:/:/')
default_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace('ns_test')
course = courses.Course(None,
app_context=sites.get_all_courses()[0])
unit = course.add_unit()
unit.now_available = True
lesson_a = course.add_lesson(unit)
lesson_a.video = 'portal'
lesson_a.now_available = True
lesson_b = course.add_lesson(unit)
lesson_b.objectives = '<gcb-youtube videoid="glados">'
lesson_b.now_available = True
course.update_unit(unit)
course.save()
entity = announcements.AnnouncementEntity()
entity.html = '<gcb-youtube videoid="aperature">'
entity.title = 'Sample Announcement'
entity.date = datetime.datetime.now().date()
entity.is_draft = False
entity.put()
self.index_test_course()
response = self.get('/test/search?query=apple')
self.assertIn('gcb-search-result', response.body)
self.assertIn('start=3.14', response.body)
self.assertIn('v=portal', response.body)
self.assertIn('v=glados', response.body)
self.assertIn('v=aperature', response.body)
self.assertIn('lemon', response.body)
self.assertIn('Medicus Quis', response.body)
self.assertIn('- YouTube', response.body)
self.assertIn('http://thumbnail.null', response.body)
# Test to make sure empty notes field doesn't cause a urlfetch
response = self.get('/test/search?query=cogito')
self.assertNotIn('gcb-search-result', response.body)
finally:
namespace_manager.set_namespace(default_namespace)
def test_announcements(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
self.get('announcements')
response = self.get('dashboard?action=search')
index_token = self.get_xsrf_token(response.body, 'gcb-index-course')
response = self.post('dashboard?action=index_course',
{'xsrf_token': index_token})
self.execute_all_deferred_tasks()
# This matches an announcement in the Power Searching course
response = self.get(
'search?query=Certificates%20qualifying%20participants')
self.assertIn('gcb-search-result', response.body)
self.assertIn('announcements#', response.body)
# The draft announcement in Power Searching should not be indexed
response = self.get('search?query=Welcome%20to%20the%20final%20class')
self.assertNotIn('gcb-search-result', response.body)
self.assertNotIn('announcements#', response.body)
def test_private_units_and_lessons(self):
sites.setup_courses('course:/test::ns_test, course:/:/')
course = courses.Course(None, app_context=sites.get_all_courses()[0])
unit1 = course.add_unit()
lesson11 = course.add_lesson(unit1)
lesson11.notes = search_unit_test.VALID_PAGE_URL
lesson11.objectives = search_unit_test.VALID_PAGE
lesson11.video = 'portal'
unit2 = course.add_unit()
lesson21 = course.add_lesson(unit2)
lesson21.notes = search_unit_test.VALID_PAGE_URL
lesson21.objectives = search_unit_test.VALID_PAGE
lesson21.video = 'portal'
unit1.now_available = True
lesson11.now_available = False
course.update_unit(unit1)
unit2.now_available = False
lesson21.now_available = True
course.update_unit(unit2)
course.save()
self.index_test_course()
response = self.get('/test/search?query=cogito%20ergo%20sum')
self.assertNotIn('gcb-search-result', response.body)
response = self.get('/test/search?query=apple')
self.assertNotIn('gcb-search-result', response.body)
self.assertNotIn('v=portal', response.body)
| apache-2.0 |
elelianghh/sqlalchemy | test/orm/test_subquery_relations.py | 25 | 77964 | from sqlalchemy.testing import eq_, is_, is_not_
from sqlalchemy import testing
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy import Integer, String, ForeignKey, bindparam, inspect
from sqlalchemy.orm import backref, subqueryload, subqueryload_all, \
mapper, relationship, clear_mappers, create_session, lazyload, \
aliased, joinedload, deferred, undefer, eagerload_all,\
Session
from sqlalchemy.testing import eq_, assert_raises, \
assert_raises_message
from sqlalchemy.testing.assertsql import CompiledSQL
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.entities import ComparableEntity
from test.orm import _fixtures
import sqlalchemy as sa
from sqlalchemy.orm import with_polymorphic
class EagerTest(_fixtures.FixtureTest, testing.AssertsCompiledSQL):
run_inserts = 'once'
run_deletes = None
def test_basic(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(
mapper(Address, addresses),
order_by=Address.id)
})
sess = create_session()
q = sess.query(User).options(subqueryload(User.addresses))
def go():
eq_(
[User(id=7, addresses=[
Address(id=1, email_address='jack@bean.com')])],
q.filter(User.id==7).all()
)
self.assert_sql_count(testing.db, go, 2)
def go():
eq_(
self.static.user_address_result,
q.order_by(User.id).all()
)
self.assert_sql_count(testing.db, go, 2)
def test_from_aliased(self):
users, Dingaling, User, dingalings, Address, addresses = (self.tables.users,
self.classes.Dingaling,
self.classes.User,
self.tables.dingalings,
self.classes.Address,
self.tables.addresses)
mapper(Dingaling, dingalings)
mapper(Address, addresses, properties={
'dingalings':relationship(Dingaling, order_by=Dingaling.id)
})
mapper(User, users, properties={
'addresses':relationship(
Address,
order_by=Address.id)
})
sess = create_session()
u = aliased(User)
q = sess.query(u).options(subqueryload(u.addresses))
def go():
eq_(
[User(id=7, addresses=[
Address(id=1, email_address='jack@bean.com')])],
q.filter(u.id==7).all()
)
self.assert_sql_count(testing.db, go, 2)
def go():
eq_(
self.static.user_address_result,
q.order_by(u.id).all()
)
self.assert_sql_count(testing.db, go, 2)
q = sess.query(u).\
options(subqueryload_all(u.addresses, Address.dingalings))
def go():
eq_(
[
User(id=8, addresses=[
Address(id=2, email_address='ed@wood.com', dingalings=[Dingaling()]),
Address(id=3, email_address='ed@bettyboop.com'),
Address(id=4, email_address='ed@lala.com'),
]),
User(id=9, addresses=[
Address(id=5, dingalings=[Dingaling()])
]),
],
q.filter(u.id.in_([8, 9])).all()
)
self.assert_sql_count(testing.db, go, 3)
def test_from_get(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(
mapper(Address, addresses),
order_by=Address.id)
})
sess = create_session()
q = sess.query(User).options(subqueryload(User.addresses))
def go():
eq_(
User(id=7, addresses=[
Address(id=1, email_address='jack@bean.com')]),
q.get(7)
)
self.assert_sql_count(testing.db, go, 2)
def test_from_params(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(
mapper(Address, addresses),
order_by=Address.id)
})
sess = create_session()
q = sess.query(User).options(subqueryload(User.addresses))
def go():
eq_(
User(id=7, addresses=[
Address(id=1, email_address='jack@bean.com')]),
q.filter(User.id==bindparam('foo')).params(foo=7).one()
)
self.assert_sql_count(testing.db, go, 2)
def test_disable_dynamic(self):
"""test no subquery option on a dynamic."""
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address, lazy="dynamic")
})
mapper(Address, addresses)
sess = create_session()
# previously this would not raise, but would emit
# the query needlessly and put the result nowhere.
assert_raises_message(
sa.exc.InvalidRequestError,
"User.addresses' does not support object population - eager loading cannot be applied.",
sess.query(User).options(subqueryload(User.addresses)).first,
)
def test_many_to_many_plain(self):
keywords, items, item_keywords, Keyword, Item = (self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item)
mapper(Keyword, keywords)
mapper(Item, items, properties = dict(
keywords = relationship(Keyword, secondary=item_keywords,
lazy='subquery', order_by=keywords.c.id)))
q = create_session().query(Item).order_by(Item.id)
def go():
eq_(self.static.item_keyword_result, q.all())
self.assert_sql_count(testing.db, go, 2)
def test_many_to_many_with_join(self):
keywords, items, item_keywords, Keyword, Item = (self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item)
mapper(Keyword, keywords)
mapper(Item, items, properties = dict(
keywords = relationship(Keyword, secondary=item_keywords,
lazy='subquery', order_by=keywords.c.id)))
q = create_session().query(Item).order_by(Item.id)
def go():
eq_(self.static.item_keyword_result[0:2],
q.join('keywords').filter(Keyword.name == 'red').all())
self.assert_sql_count(testing.db, go, 2)
def test_many_to_many_with_join_alias(self):
keywords, items, item_keywords, Keyword, Item = (self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item)
mapper(Keyword, keywords)
mapper(Item, items, properties = dict(
keywords = relationship(Keyword, secondary=item_keywords,
lazy='subquery', order_by=keywords.c.id)))
q = create_session().query(Item).order_by(Item.id)
def go():
eq_(self.static.item_keyword_result[0:2],
(q.join('keywords', aliased=True).
filter(Keyword.name == 'red')).all())
self.assert_sql_count(testing.db, go, 2)
def test_orderby(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties = {
'addresses':relationship(mapper(Address, addresses),
lazy='subquery', order_by=addresses.c.email_address),
})
q = create_session().query(User)
eq_([
User(id=7, addresses=[
Address(id=1)
]),
User(id=8, addresses=[
Address(id=3, email_address='ed@bettyboop.com'),
Address(id=4, email_address='ed@lala.com'),
Address(id=2, email_address='ed@wood.com')
]),
User(id=9, addresses=[
Address(id=5)
]),
User(id=10, addresses=[])
], q.order_by(User.id).all())
def test_orderby_multi(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties = {
'addresses':relationship(mapper(Address, addresses),
lazy='subquery',
order_by=[
addresses.c.email_address,
addresses.c.id]),
})
q = create_session().query(User)
eq_([
User(id=7, addresses=[
Address(id=1)
]),
User(id=8, addresses=[
Address(id=3, email_address='ed@bettyboop.com'),
Address(id=4, email_address='ed@lala.com'),
Address(id=2, email_address='ed@wood.com')
]),
User(id=9, addresses=[
Address(id=5)
]),
User(id=10, addresses=[])
], q.order_by(User.id).all())
def test_orderby_related(self):
"""A regular mapper select on a single table can
order by a relationship to a second table"""
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(Address, addresses)
mapper(User, users, properties = dict(
addresses = relationship(Address,
lazy='subquery',
order_by=addresses.c.id),
))
q = create_session().query(User)
l = q.filter(User.id==Address.user_id).\
order_by(Address.email_address).all()
eq_([
User(id=8, addresses=[
Address(id=2, email_address='ed@wood.com'),
Address(id=3, email_address='ed@bettyboop.com'),
Address(id=4, email_address='ed@lala.com'),
]),
User(id=9, addresses=[
Address(id=5)
]),
User(id=7, addresses=[
Address(id=1)
]),
], l)
def test_orderby_desc(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(Address, addresses)
mapper(User, users, properties = dict(
addresses = relationship(Address, lazy='subquery',
order_by=[
sa.desc(addresses.c.email_address)
]),
))
sess = create_session()
eq_([
User(id=7, addresses=[
Address(id=1)
]),
User(id=8, addresses=[
Address(id=2, email_address='ed@wood.com'),
Address(id=4, email_address='ed@lala.com'),
Address(id=3, email_address='ed@bettyboop.com'),
]),
User(id=9, addresses=[
Address(id=5)
]),
User(id=10, addresses=[])
], sess.query(User).order_by(User.id).all())
_pathing_runs = [
( "lazyload", "lazyload", "lazyload", 15 ),
("subqueryload", "lazyload", "lazyload", 12),
("subqueryload", "subqueryload", "lazyload", 8),
("joinedload", "subqueryload", "lazyload", 7),
("lazyload", "lazyload", "subqueryload", 12),
("subqueryload", "subqueryload", "subqueryload", 4),
("subqueryload", "subqueryload", "joinedload", 3),
]
def test_options_pathing(self):
self._do_options_test(self._pathing_runs)
def test_mapper_pathing(self):
self._do_mapper_test(self._pathing_runs)
def _do_options_test(self, configs):
users, Keyword, orders, items, order_items, Order, Item, User, keywords, item_keywords = (self.tables.users,
self.classes.Keyword,
self.tables.orders,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.keywords,
self.tables.item_keywords)
mapper(User, users, properties={
'orders':relationship(Order, order_by=orders.c.id), # o2m, m2o
})
mapper(Order, orders, properties={
'items':relationship(Item,
secondary=order_items, order_by=items.c.id), #m2m
})
mapper(Item, items, properties={
'keywords':relationship(Keyword,
secondary=item_keywords,
order_by=keywords.c.id) #m2m
})
mapper(Keyword, keywords)
callables = {
'joinedload':joinedload,
'subqueryload':subqueryload
}
for o, i, k, count in configs:
options = []
if o in callables:
options.append(callables[o](User.orders))
if i in callables:
options.append(callables[i](User.orders, Order.items))
if k in callables:
options.append(callables[k](User.orders, Order.items, Item.keywords))
self._do_query_tests(options, count)
def _do_mapper_test(self, configs):
users, Keyword, orders, items, order_items, Order, Item, User, keywords, item_keywords = (self.tables.users,
self.classes.Keyword,
self.tables.orders,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.keywords,
self.tables.item_keywords)
opts = {
'lazyload':'select',
'joinedload':'joined',
'subqueryload':'subquery',
}
for o, i, k, count in configs:
mapper(User, users, properties={
'orders':relationship(Order, lazy=opts[o], order_by=orders.c.id),
})
mapper(Order, orders, properties={
'items':relationship(Item,
secondary=order_items, lazy=opts[i], order_by=items.c.id),
})
mapper(Item, items, properties={
'keywords':relationship(Keyword,
lazy=opts[k],
secondary=item_keywords,
order_by=keywords.c.id)
})
mapper(Keyword, keywords)
try:
self._do_query_tests([], count)
finally:
clear_mappers()
def _do_query_tests(self, opts, count):
Order, User = self.classes.Order, self.classes.User
sess = create_session()
def go():
eq_(
sess.query(User).options(*opts).order_by(User.id).all(),
self.static.user_item_keyword_result
)
self.assert_sql_count(testing.db, go, count)
eq_(
sess.query(User).options(*opts).filter(User.name=='fred').
order_by(User.id).all(),
self.static.user_item_keyword_result[2:3]
)
sess = create_session()
eq_(
sess.query(User).options(*opts).join(User.orders).
filter(Order.id==3).\
order_by(User.id).all(),
self.static.user_item_keyword_result[0:1]
)
def test_cyclical(self):
"""A circular eager relationship breaks the cycle with a lazy loader"""
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(Address, addresses)
mapper(User, users, properties = dict(
addresses = relationship(Address, lazy='subquery',
backref=sa.orm.backref('user', lazy='subquery'),
order_by=Address.id)
))
is_(sa.orm.class_mapper(User).get_property('addresses').lazy, 'subquery')
is_(sa.orm.class_mapper(Address).get_property('user').lazy, 'subquery')
sess = create_session()
eq_(self.static.user_address_result, sess.query(User).order_by(User.id).all())
def test_double(self):
"""Eager loading with two relationships simultaneously,
from the same table, using aliases."""
users, orders, User, Address, Order, addresses = (self.tables.users,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses)
openorders = sa.alias(orders, 'openorders')
closedorders = sa.alias(orders, 'closedorders')
mapper(Address, addresses)
mapper(Order, orders)
open_mapper = mapper(Order, openorders, non_primary=True)
closed_mapper = mapper(Order, closedorders, non_primary=True)
mapper(User, users, properties = dict(
addresses = relationship(Address, lazy='subquery',
order_by=addresses.c.id),
open_orders = relationship(
open_mapper,
primaryjoin=sa.and_(openorders.c.isopen == 1,
users.c.id==openorders.c.user_id),
lazy='subquery', order_by=openorders.c.id),
closed_orders = relationship(
closed_mapper,
primaryjoin=sa.and_(closedorders.c.isopen == 0,
users.c.id==closedorders.c.user_id),
lazy='subquery', order_by=closedorders.c.id)))
q = create_session().query(User).order_by(User.id)
def go():
eq_([
User(
id=7,
addresses=[Address(id=1)],
open_orders = [Order(id=3)],
closed_orders = [Order(id=1), Order(id=5)]
),
User(
id=8,
addresses=[Address(id=2), Address(id=3), Address(id=4)],
open_orders = [],
closed_orders = []
),
User(
id=9,
addresses=[Address(id=5)],
open_orders = [Order(id=4)],
closed_orders = [Order(id=2)]
),
User(id=10)
], q.all())
self.assert_sql_count(testing.db, go, 4)
def test_double_same_mappers(self):
"""Eager loading with two relationships simulatneously,
from the same table, using aliases."""
addresses, items, order_items, orders, Item, User, Address, Order, users = (self.tables.addresses,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.users)
mapper(Address, addresses)
mapper(Order, orders, properties={
'items': relationship(Item, secondary=order_items, lazy='subquery',
order_by=items.c.id)})
mapper(Item, items)
mapper(User, users, properties=dict(
addresses=relationship(Address, lazy='subquery', order_by=addresses.c.id),
open_orders=relationship(
Order,
primaryjoin=sa.and_(orders.c.isopen == 1,
users.c.id==orders.c.user_id),
lazy='subquery', order_by=orders.c.id),
closed_orders=relationship(
Order,
primaryjoin=sa.and_(orders.c.isopen == 0,
users.c.id==orders.c.user_id),
lazy='subquery', order_by=orders.c.id)))
q = create_session().query(User).order_by(User.id)
def go():
eq_([
User(id=7,
addresses=[
Address(id=1)],
open_orders=[Order(id=3,
items=[
Item(id=3),
Item(id=4),
Item(id=5)])],
closed_orders=[Order(id=1,
items=[
Item(id=1),
Item(id=2),
Item(id=3)]),
Order(id=5,
items=[
Item(id=5)])]),
User(id=8,
addresses=[
Address(id=2),
Address(id=3),
Address(id=4)],
open_orders = [],
closed_orders = []),
User(id=9,
addresses=[
Address(id=5)],
open_orders=[
Order(id=4,
items=[
Item(id=1),
Item(id=5)])],
closed_orders=[
Order(id=2,
items=[
Item(id=1),
Item(id=2),
Item(id=3)])]),
User(id=10)
], q.all())
self.assert_sql_count(testing.db, go, 6)
def test_limit(self):
"""Limit operations combined with lazy-load relationships."""
users, items, order_items, orders, Item, User, Address, Order, addresses = (self.tables.users,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses)
mapper(Item, items)
mapper(Order, orders, properties={
'items':relationship(Item, secondary=order_items, lazy='subquery',
order_by=items.c.id)
})
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses),
lazy='subquery',
order_by=addresses.c.id),
'orders':relationship(Order, lazy='select', order_by=orders.c.id)
})
sess = create_session()
q = sess.query(User)
l = q.order_by(User.id).limit(2).offset(1).all()
eq_(self.static.user_all_result[1:3], l)
sess = create_session()
l = q.order_by(sa.desc(User.id)).limit(2).offset(2).all()
eq_(list(reversed(self.static.user_all_result[0:2])), l)
def test_mapper_order_by(self):
users, User, Address, addresses = (self.tables.users,
self.classes.User,
self.classes.Address,
self.tables.addresses)
mapper(Address, addresses)
mapper(User, users, properties={
'addresses':relationship(Address,
lazy='subquery',
order_by=addresses.c.id),
},order_by=users.c.id.desc())
sess = create_session()
q = sess.query(User)
l = q.limit(2).all()
eq_(l, list(reversed(self.static.user_address_result[2:4])))
def test_one_to_many_scalar(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(User, users, properties = dict(
address = relationship(mapper(Address, addresses),
lazy='subquery', uselist=False)
))
q = create_session().query(User)
def go():
l = q.filter(users.c.id == 7).all()
eq_([User(id=7, address=Address(id=1))], l)
self.assert_sql_count(testing.db, go, 2)
def test_many_to_one(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(Address, addresses, properties = dict(
user = relationship(mapper(User, users), lazy='subquery')
))
sess = create_session()
q = sess.query(Address)
def go():
a = q.filter(addresses.c.id==1).one()
is_not_(a.user, None)
u1 = sess.query(User).get(7)
is_(a.user, u1)
self.assert_sql_count(testing.db, go, 2)
def test_double_with_aggregate(self):
User, users, orders, Order = (self.classes.User,
self.tables.users,
self.tables.orders,
self.classes.Order)
max_orders_by_user = sa.select([sa.func.max(orders.c.id).label('order_id')],
group_by=[orders.c.user_id]
).alias('max_orders_by_user')
max_orders = orders.select(orders.c.id==max_orders_by_user.c.order_id).\
alias('max_orders')
mapper(Order, orders)
mapper(User, users, properties={
'orders':relationship(Order, backref='user', lazy='subquery',
order_by=orders.c.id),
'max_order':relationship(
mapper(Order, max_orders, non_primary=True),
lazy='subquery', uselist=False)
})
q = create_session().query(User)
def go():
eq_([
User(id=7, orders=[
Order(id=1),
Order(id=3),
Order(id=5),
],
max_order=Order(id=5)
),
User(id=8, orders=[]),
User(id=9, orders=[Order(id=2),Order(id=4)],
max_order=Order(id=4)
),
User(id=10),
], q.order_by(User.id).all())
self.assert_sql_count(testing.db, go, 3)
def test_uselist_false_warning(self):
"""test that multiple rows received by a
uselist=False raises a warning."""
User, users, orders, Order = (self.classes.User,
self.tables.users,
self.tables.orders,
self.classes.Order)
mapper(User, users, properties={
'order':relationship(Order, uselist=False)
})
mapper(Order, orders)
s = create_session()
assert_raises(sa.exc.SAWarning,
s.query(User).options(subqueryload(User.order)).all)
class LoadOnExistingTest(_fixtures.FixtureTest):
"""test that loaders from a base Query fully populate."""
run_inserts = 'once'
run_deletes = None
def _collection_to_scalar_fixture(self):
User, Address, Dingaling = self.classes.User, \
self.classes.Address, self.classes.Dingaling
mapper(User, self.tables.users, properties={
'addresses':relationship(Address),
})
mapper(Address, self.tables.addresses, properties={
'dingaling':relationship(Dingaling)
})
mapper(Dingaling, self.tables.dingalings)
sess = Session(autoflush=False)
return User, Address, Dingaling, sess
def _collection_to_collection_fixture(self):
User, Order, Item = self.classes.User, \
self.classes.Order, self.classes.Item
mapper(User, self.tables.users, properties={
'orders':relationship(Order),
})
mapper(Order, self.tables.orders, properties={
'items':relationship(Item, secondary=self.tables.order_items),
})
mapper(Item, self.tables.items)
sess = Session(autoflush=False)
return User, Order, Item, sess
def _eager_config_fixture(self):
User, Address = self.classes.User, self.classes.Address
mapper(User, self.tables.users, properties={
'addresses':relationship(Address, lazy="subquery"),
})
mapper(Address, self.tables.addresses)
sess = Session(autoflush=False)
return User, Address, sess
def _deferred_config_fixture(self):
User, Address = self.classes.User, self.classes.Address
mapper(User, self.tables.users, properties={
'name':deferred(self.tables.users.c.name),
'addresses':relationship(Address, lazy="subquery"),
})
mapper(Address, self.tables.addresses)
sess = Session(autoflush=False)
return User, Address, sess
def test_no_query_on_refresh(self):
User, Address, sess = self._eager_config_fixture()
u1 = sess.query(User).get(8)
assert 'addresses' in u1.__dict__
sess.expire(u1)
def go():
eq_(u1.id, 8)
self.assert_sql_count(testing.db, go, 1)
assert 'addresses' not in u1.__dict__
def test_no_query_on_deferred(self):
User, Address, sess = self._deferred_config_fixture()
u1 = sess.query(User).get(8)
assert 'addresses' in u1.__dict__
sess.expire(u1, ['addresses'])
def go():
eq_(u1.name, 'ed')
self.assert_sql_count(testing.db, go, 1)
assert 'addresses' not in u1.__dict__
def test_populate_existing_propagate(self):
User, Address, sess = self._eager_config_fixture()
u1 = sess.query(User).get(8)
u1.addresses[2].email_address = "foofoo"
del u1.addresses[1]
u1 = sess.query(User).populate_existing().filter_by(id=8).one()
# collection is reverted
eq_(len(u1.addresses), 3)
# attributes on related items reverted
eq_(u1.addresses[2].email_address, "ed@lala.com")
def test_loads_second_level_collection_to_scalar(self):
User, Address, Dingaling, sess = self._collection_to_scalar_fixture()
u1 = sess.query(User).get(8)
a1 = Address()
u1.addresses.append(a1)
a2 = u1.addresses[0]
a2.email_address = 'foo'
sess.query(User).options(subqueryload_all("addresses.dingaling")).\
filter_by(id=8).all()
assert u1.addresses[-1] is a1
for a in u1.addresses:
if a is not a1:
assert 'dingaling' in a.__dict__
else:
assert 'dingaling' not in a.__dict__
if a is a2:
eq_(a2.email_address, 'foo')
def test_loads_second_level_collection_to_collection(self):
User, Order, Item, sess = self._collection_to_collection_fixture()
u1 = sess.query(User).get(7)
u1.orders
o1 = Order()
u1.orders.append(o1)
sess.query(User).options(subqueryload_all("orders.items")).\
filter_by(id=7).all()
for o in u1.orders:
if o is not o1:
assert 'items' in o.__dict__
else:
assert 'items' not in o.__dict__
def test_load_two_levels_collection_to_scalar(self):
User, Address, Dingaling, sess = self._collection_to_scalar_fixture()
u1 = sess.query(User).filter_by(id=8).options(subqueryload("addresses")).one()
sess.query(User).filter_by(id=8).options(subqueryload_all("addresses.dingaling")).first()
assert 'dingaling' in u1.addresses[0].__dict__
def test_load_two_levels_collection_to_collection(self):
User, Order, Item, sess = self._collection_to_collection_fixture()
u1 = sess.query(User).filter_by(id=7).options(subqueryload("orders")).one()
sess.query(User).filter_by(id=7).options(subqueryload_all("orders.items")).first()
assert 'items' in u1.orders[0].__dict__
class OrderBySecondaryTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('m2m', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('aid', Integer, ForeignKey('a.id')),
Column('bid', Integer, ForeignKey('b.id')))
Table('a', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(50)))
Table('b', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(50)))
@classmethod
def fixtures(cls):
return dict(
a=(('id', 'data'),
(1, 'a1'),
(2, 'a2')),
b=(('id', 'data'),
(1, 'b1'),
(2, 'b2'),
(3, 'b3'),
(4, 'b4')),
m2m=(('id', 'aid', 'bid'),
(2, 1, 1),
(4, 2, 4),
(1, 1, 3),
(6, 2, 2),
(3, 1, 2),
(5, 2, 3)))
def test_ordering(self):
a, m2m, b = (self.tables.a,
self.tables.m2m,
self.tables.b)
class A(fixtures.ComparableEntity):pass
class B(fixtures.ComparableEntity):pass
mapper(A, a, properties={
'bs':relationship(B, secondary=m2m, lazy='subquery', order_by=m2m.c.id)
})
mapper(B, b)
sess = create_session()
def go():
eq_(sess.query(A).all(), [
A(data='a1', bs=[B(data='b3'), B(data='b1'), B(data='b2')]),
A(bs=[B(data='b4'), B(data='b3'), B(data='b2')])
])
self.assert_sql_count(testing.db, go, 2)
from .inheritance._poly_fixtures import _Polymorphic, Person, Engineer, \
Paperwork, Machine, MachineType, Company
class BaseRelationFromJoinedSubclassTest(_Polymorphic):
@classmethod
def define_tables(cls, metadata):
people = Table('people', metadata,
Column('person_id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(30)))
# to test fully, PK of engineers table must be
# named differently from that of people
engineers = Table('engineers', metadata,
Column('engineer_id', Integer,
ForeignKey('people.person_id'),
primary_key=True),
Column('primary_language', String(50)))
paperwork = Table('paperwork', metadata,
Column('paperwork_id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('description', String(50)),
Column('person_id', Integer,
ForeignKey('people.person_id')))
@classmethod
def setup_mappers(cls):
people = cls.tables.people
engineers = cls.tables.engineers
paperwork = cls.tables.paperwork
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person',
properties={
'paperwork': relationship(
Paperwork, order_by=paperwork.c.paperwork_id)})
mapper(Engineer, engineers,
inherits=Person,
polymorphic_identity='engineer')
mapper(Paperwork, paperwork)
@classmethod
def insert_data(cls):
e1 = Engineer(primary_language="java")
e2 = Engineer(primary_language="c++")
e1.paperwork = [Paperwork(description="tps report #1"),
Paperwork(description="tps report #2")]
e2.paperwork = [Paperwork(description="tps report #3")]
sess = create_session()
sess.add_all([e1, e2])
sess.flush()
def test_correct_subquery_nofrom(self):
sess = create_session()
# use Person.paperwork here just to give the least
# amount of context
q = sess.query(Engineer).\
filter(Engineer.primary_language == 'java').\
options(subqueryload(Person.paperwork))
def go():
eq_(q.all()[0].paperwork,
[Paperwork(description="tps report #1"),
Paperwork(description="tps report #2")],
)
self.assert_sql_execution(
testing.db,
go,
CompiledSQL(
"SELECT people.person_id AS people_person_id, "
"people.name AS people_name, people.type AS people_type, "
"engineers.engineer_id AS engineers_engineer_id, "
"engineers.primary_language AS engineers_primary_language "
"FROM people JOIN engineers ON "
"people.person_id = engineers.engineer_id "
"WHERE engineers.primary_language = :primary_language_1",
{"primary_language_1": "java"}
),
# ensure we get "people JOIN engineer" here, even though
# primary key "people.person_id" is against "Person"
# *and* the path comes out as "Person.paperwork", still
# want to select from "Engineer" entity
CompiledSQL(
"SELECT paperwork.paperwork_id AS paperwork_paperwork_id, "
"paperwork.description AS paperwork_description, "
"paperwork.person_id AS paperwork_person_id, "
"anon_1.people_person_id AS anon_1_people_person_id "
"FROM (SELECT people.person_id AS people_person_id "
"FROM people JOIN engineers "
"ON people.person_id = engineers.engineer_id "
"WHERE engineers.primary_language = "
":primary_language_1) AS anon_1 "
"JOIN paperwork "
"ON anon_1.people_person_id = paperwork.person_id "
"ORDER BY anon_1.people_person_id, paperwork.paperwork_id",
{"primary_language_1": "java"}
)
)
def test_correct_subquery_existingfrom(self):
sess = create_session()
# use Person.paperwork here just to give the least
# amount of context
q = sess.query(Engineer).\
filter(Engineer.primary_language == 'java').\
join(Engineer.paperwork).\
filter(Paperwork.description == "tps report #2").\
options(subqueryload(Person.paperwork))
def go():
eq_(q.one().paperwork,
[Paperwork(description="tps report #1"),
Paperwork(description="tps report #2")],
)
self.assert_sql_execution(
testing.db,
go,
CompiledSQL(
"SELECT people.person_id AS people_person_id, "
"people.name AS people_name, people.type AS people_type, "
"engineers.engineer_id AS engineers_engineer_id, "
"engineers.primary_language AS engineers_primary_language "
"FROM people JOIN engineers "
"ON people.person_id = engineers.engineer_id "
"JOIN paperwork ON people.person_id = paperwork.person_id "
"WHERE engineers.primary_language = :primary_language_1 "
"AND paperwork.description = :description_1",
{"primary_language_1": "java",
"description_1": "tps report #2"}
),
CompiledSQL(
"SELECT paperwork.paperwork_id AS paperwork_paperwork_id, "
"paperwork.description AS paperwork_description, "
"paperwork.person_id AS paperwork_person_id, "
"anon_1.people_person_id AS anon_1_people_person_id "
"FROM (SELECT people.person_id AS people_person_id "
"FROM people JOIN engineers ON people.person_id = "
"engineers.engineer_id JOIN paperwork "
"ON people.person_id = paperwork.person_id "
"WHERE engineers.primary_language = :primary_language_1 AND "
"paperwork.description = :description_1) AS anon_1 "
"JOIN paperwork ON anon_1.people_person_id = "
"paperwork.person_id "
"ORDER BY anon_1.people_person_id, paperwork.paperwork_id",
{"primary_language_1": "java",
"description_1": "tps report #2"}
)
)
def test_correct_subquery_with_polymorphic_no_alias(self):
# test #3106
sess = create_session()
wp = with_polymorphic(Person, [Engineer])
q = sess.query(wp).\
options(subqueryload(wp.paperwork)).\
order_by(Engineer.primary_language.desc())
def go():
eq_(q.first(),
Engineer(
paperwork=[
Paperwork(description="tps report #1"),
Paperwork(description="tps report #2")],
primary_language='java'
)
)
self.assert_sql_execution(
testing.db,
go,
CompiledSQL(
"SELECT people.person_id AS people_person_id, "
"people.name AS people_name, people.type AS people_type, "
"engineers.engineer_id AS engineers_engineer_id, "
"engineers.primary_language AS engineers_primary_language "
"FROM people LEFT OUTER JOIN engineers ON people.person_id = "
"engineers.engineer_id ORDER BY engineers.primary_language "
"DESC LIMIT :param_1"),
CompiledSQL(
"SELECT paperwork.paperwork_id AS paperwork_paperwork_id, "
"paperwork.description AS paperwork_description, "
"paperwork.person_id AS paperwork_person_id, "
"anon_1.people_person_id AS anon_1_people_person_id FROM "
"(SELECT people.person_id AS people_person_id FROM people "
"LEFT OUTER JOIN engineers ON people.person_id = "
"engineers.engineer_id ORDER BY engineers.primary_language "
"DESC LIMIT :param_1) AS anon_1 JOIN paperwork "
"ON anon_1.people_person_id = paperwork.person_id "
"ORDER BY anon_1.people_person_id, paperwork.paperwork_id")
)
def test_correct_subquery_with_polymorphic_alias(self):
# test #3106
sess = create_session()
wp = with_polymorphic(Person, [Engineer], aliased=True)
q = sess.query(wp).\
options(subqueryload(wp.paperwork)).\
order_by(wp.Engineer.primary_language.desc())
def go():
eq_(q.first(),
Engineer(
paperwork=[
Paperwork(description="tps report #1"),
Paperwork(description="tps report #2")],
primary_language='java'
)
)
self.assert_sql_execution(
testing.db,
go,
CompiledSQL(
"SELECT anon_1.people_person_id AS anon_1_people_person_id, "
"anon_1.people_name AS anon_1_people_name, "
"anon_1.people_type AS anon_1_people_type, "
"anon_1.engineers_engineer_id AS anon_1_engineers_engineer_id, "
"anon_1.engineers_primary_language "
"AS anon_1_engineers_primary_language FROM "
"(SELECT people.person_id AS people_person_id, "
"people.name AS people_name, people.type AS people_type, "
"engineers.engineer_id AS engineers_engineer_id, "
"engineers.primary_language AS engineers_primary_language "
"FROM people LEFT OUTER JOIN engineers ON people.person_id = "
"engineers.engineer_id) AS anon_1 "
"ORDER BY anon_1.engineers_primary_language DESC "
"LIMIT :param_1"),
CompiledSQL(
"SELECT paperwork.paperwork_id AS paperwork_paperwork_id, "
"paperwork.description AS paperwork_description, "
"paperwork.person_id AS paperwork_person_id, "
"anon_1.anon_2_people_person_id AS "
"anon_1_anon_2_people_person_id FROM "
"(SELECT DISTINCT anon_2.people_person_id AS "
"anon_2_people_person_id, "
"anon_2.engineers_primary_language AS "
"anon_2_engineers_primary_language FROM "
"(SELECT people.person_id AS people_person_id, "
"people.name AS people_name, people.type AS people_type, "
"engineers.engineer_id AS engineers_engineer_id, "
"engineers.primary_language AS engineers_primary_language "
"FROM people LEFT OUTER JOIN engineers ON people.person_id = "
"engineers.engineer_id) AS anon_2 "
"ORDER BY anon_2.engineers_primary_language "
"DESC LIMIT :param_1) AS anon_1 "
"JOIN paperwork "
"ON anon_1.anon_2_people_person_id = paperwork.person_id "
"ORDER BY anon_1.anon_2_people_person_id, "
"paperwork.paperwork_id")
)
def test_correct_subquery_with_polymorphic_flat_alias(self):
# test #3106
sess = create_session()
wp = with_polymorphic(Person, [Engineer], aliased=True, flat=True)
q = sess.query(wp).\
options(subqueryload(wp.paperwork)).\
order_by(wp.Engineer.primary_language.desc())
def go():
eq_(q.first(),
Engineer(
paperwork=[
Paperwork(description="tps report #1"),
Paperwork(description="tps report #2")],
primary_language='java'
)
)
self.assert_sql_execution(
testing.db,
go,
CompiledSQL(
"SELECT people_1.person_id AS people_1_person_id, "
"people_1.name AS people_1_name, "
"people_1.type AS people_1_type, "
"engineers_1.engineer_id AS engineers_1_engineer_id, "
"engineers_1.primary_language AS engineers_1_primary_language "
"FROM people AS people_1 "
"LEFT OUTER JOIN engineers AS engineers_1 "
"ON people_1.person_id = engineers_1.engineer_id "
"ORDER BY engineers_1.primary_language DESC LIMIT :param_1"),
CompiledSQL(
"SELECT paperwork.paperwork_id AS paperwork_paperwork_id, "
"paperwork.description AS paperwork_description, "
"paperwork.person_id AS paperwork_person_id, "
"anon_1.people_1_person_id AS anon_1_people_1_person_id "
"FROM (SELECT people_1.person_id AS people_1_person_id "
"FROM people AS people_1 "
"LEFT OUTER JOIN engineers AS engineers_1 "
"ON people_1.person_id = engineers_1.engineer_id "
"ORDER BY engineers_1.primary_language DESC LIMIT :param_1) "
"AS anon_1 JOIN paperwork ON anon_1.people_1_person_id = "
"paperwork.person_id ORDER BY anon_1.people_1_person_id, "
"paperwork.paperwork_id"
)
)
class SubRelationFromJoinedSubclassMultiLevelTest(_Polymorphic):
@classmethod
def define_tables(cls, metadata):
Table('companies', metadata,
Column('company_id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)))
Table('people', metadata,
Column('person_id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('company_id', ForeignKey('companies.company_id')),
Column('name', String(50)),
Column('type', String(30)))
Table('engineers', metadata,
Column('engineer_id', ForeignKey('people.person_id'),
primary_key=True),
Column('primary_language', String(50)))
Table('machines', metadata,
Column('machine_id',
Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('engineer_id', ForeignKey('engineers.engineer_id')),
Column('machine_type_id',
ForeignKey('machine_type.machine_type_id')))
Table('machine_type', metadata,
Column('machine_type_id',
Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)))
@classmethod
def setup_mappers(cls):
companies = cls.tables.companies
people = cls.tables.people
engineers = cls.tables.engineers
machines = cls.tables.machines
machine_type = cls.tables.machine_type
mapper(Company, companies, properties={
'employees': relationship(Person, order_by=people.c.person_id)
})
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person',
with_polymorphic='*')
mapper(Engineer, engineers,
inherits=Person,
polymorphic_identity='engineer', properties={
'machines': relationship(Machine,
order_by=machines.c.machine_id)
})
mapper(Machine, machines, properties={
'type': relationship(MachineType)
})
mapper(MachineType, machine_type)
@classmethod
def insert_data(cls):
c1 = cls._fixture()
sess = create_session()
sess.add(c1)
sess.flush()
@classmethod
def _fixture(cls):
mt1 = MachineType(name='mt1')
mt2 = MachineType(name='mt2')
return Company(
employees=[
Engineer(
name='e1',
machines=[
Machine(name='m1', type=mt1),
Machine(name='m2', type=mt2)
]
),
Engineer(
name='e2',
machines=[
Machine(name='m3', type=mt1),
Machine(name='m4', type=mt1)
]
)
])
def test_chained_subq_subclass(self):
s = Session()
q = s.query(Company).options(
subqueryload(Company.employees.of_type(Engineer)).
subqueryload(Engineer.machines).
subqueryload(Machine.type)
)
def go():
eq_(
q.all(),
[self._fixture()]
)
self.assert_sql_count(testing.db, go, 4)
class SelfReferentialTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('nodes', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('parent_id', Integer, ForeignKey('nodes.id')),
Column('data', String(30)))
def test_basic(self):
nodes = self.tables.nodes
class Node(fixtures.ComparableEntity):
def append(self, node):
self.children.append(node)
mapper(Node, nodes, properties={
'children':relationship(Node,
lazy='subquery',
join_depth=3, order_by=nodes.c.id)
})
sess = create_session()
n1 = Node(data='n1')
n1.append(Node(data='n11'))
n1.append(Node(data='n12'))
n1.append(Node(data='n13'))
n1.children[1].append(Node(data='n121'))
n1.children[1].append(Node(data='n122'))
n1.children[1].append(Node(data='n123'))
n2 = Node(data='n2')
n2.append(Node(data='n21'))
n2.children[0].append(Node(data='n211'))
n2.children[0].append(Node(data='n212'))
sess.add(n1)
sess.add(n2)
sess.flush()
sess.expunge_all()
def go():
d = sess.query(Node).filter(Node.data.in_(['n1', 'n2'])).\
order_by(Node.data).all()
eq_([Node(data='n1', children=[
Node(data='n11'),
Node(data='n12', children=[
Node(data='n121'),
Node(data='n122'),
Node(data='n123')
]),
Node(data='n13')
]),
Node(data='n2', children=[
Node(data='n21', children=[
Node(data='n211'),
Node(data='n212'),
])
])
], d)
self.assert_sql_count(testing.db, go, 4)
def test_lazy_fallback_doesnt_affect_eager(self):
nodes = self.tables.nodes
class Node(fixtures.ComparableEntity):
def append(self, node):
self.children.append(node)
mapper(Node, nodes, properties={
'children':relationship(Node, lazy='subquery', join_depth=1,
order_by=nodes.c.id)
})
sess = create_session()
n1 = Node(data='n1')
n1.append(Node(data='n11'))
n1.append(Node(data='n12'))
n1.append(Node(data='n13'))
n1.children[1].append(Node(data='n121'))
n1.children[1].append(Node(data='n122'))
n1.children[1].append(Node(data='n123'))
sess.add(n1)
sess.flush()
sess.expunge_all()
def go():
allnodes = sess.query(Node).order_by(Node.data).all()
n12 = allnodes[2]
eq_(n12.data, 'n12')
eq_([
Node(data='n121'),
Node(data='n122'),
Node(data='n123')
], list(n12.children))
self.assert_sql_count(testing.db, go, 4)
def test_with_deferred(self):
nodes = self.tables.nodes
class Node(fixtures.ComparableEntity):
def append(self, node):
self.children.append(node)
mapper(Node, nodes, properties={
'children':relationship(Node, lazy='subquery', join_depth=3,
order_by=nodes.c.id),
'data':deferred(nodes.c.data)
})
sess = create_session()
n1 = Node(data='n1')
n1.append(Node(data='n11'))
n1.append(Node(data='n12'))
sess.add(n1)
sess.flush()
sess.expunge_all()
def go():
eq_(
Node(data='n1', children=[Node(data='n11'), Node(data='n12')]),
sess.query(Node).order_by(Node.id).first(),
)
self.assert_sql_count(testing.db, go, 6)
sess.expunge_all()
def go():
eq_(Node(data='n1', children=[Node(data='n11'), Node(data='n12')]),
sess.query(Node).options(undefer('data')).order_by(Node.id).first())
self.assert_sql_count(testing.db, go, 5)
sess.expunge_all()
def go():
eq_(Node(data='n1', children=[Node(data='n11'), Node(data='n12')]),
sess.query(Node).options(undefer('data'),
undefer('children.data')).first())
self.assert_sql_count(testing.db, go, 3)
def test_options(self):
nodes = self.tables.nodes
class Node(fixtures.ComparableEntity):
def append(self, node):
self.children.append(node)
mapper(Node, nodes, properties={
'children':relationship(Node, order_by=nodes.c.id)
}, order_by=nodes.c.id)
sess = create_session()
n1 = Node(data='n1')
n1.append(Node(data='n11'))
n1.append(Node(data='n12'))
n1.append(Node(data='n13'))
n1.children[1].append(Node(data='n121'))
n1.children[1].append(Node(data='n122'))
n1.children[1].append(Node(data='n123'))
sess.add(n1)
sess.flush()
sess.expunge_all()
def go():
d = sess.query(Node).filter_by(data='n1').\
options(subqueryload_all('children.children')).first()
eq_(Node(data='n1', children=[
Node(data='n11'),
Node(data='n12', children=[
Node(data='n121'),
Node(data='n122'),
Node(data='n123')
]),
Node(data='n13')
]), d)
self.assert_sql_count(testing.db, go, 3)
def test_no_depth(self):
"""no join depth is set, so no eager loading occurs."""
nodes = self.tables.nodes
class Node(fixtures.ComparableEntity):
def append(self, node):
self.children.append(node)
mapper(Node, nodes, properties={
'children':relationship(Node, lazy='subquery')
})
sess = create_session()
n1 = Node(data='n1')
n1.append(Node(data='n11'))
n1.append(Node(data='n12'))
n1.append(Node(data='n13'))
n1.children[1].append(Node(data='n121'))
n1.children[1].append(Node(data='n122'))
n1.children[1].append(Node(data='n123'))
n2 = Node(data='n2')
n2.append(Node(data='n21'))
sess.add(n1)
sess.add(n2)
sess.flush()
sess.expunge_all()
def go():
d = sess.query(Node).filter(Node.data.in_(['n1', 'n2'])).order_by(Node.data).all()
eq_([
Node(data='n1', children=[
Node(data='n11'),
Node(data='n12', children=[
Node(data='n121'),
Node(data='n122'),
Node(data='n123')
]),
Node(data='n13')
]),
Node(data='n2', children=[
Node(data='n21')
])
], d)
self.assert_sql_count(testing.db, go, 4)
class InheritanceToRelatedTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('foo', metadata,
Column("id", Integer, primary_key=True),
Column("type", String(50)),
Column("related_id", Integer, ForeignKey("related.id"))
)
Table("bar", metadata,
Column("id", Integer, ForeignKey('foo.id'), primary_key=True),
)
Table("baz", metadata,
Column("id", Integer, ForeignKey('foo.id'), primary_key=True),
)
Table("related", metadata,
Column("id", Integer, primary_key=True),
)
@classmethod
def setup_classes(cls):
class Foo(cls.Comparable):
pass
class Bar(Foo):
pass
class Baz(Foo):
pass
class Related(cls.Comparable):
pass
@classmethod
def fixtures(cls):
return dict(
foo=[
('id', 'type', 'related_id'),
(1, 'bar', 1),
(2, 'bar', 2),
(3, 'baz', 1),
(4, 'baz', 2),
],
bar=[
('id', ),
(1,),
(2,)
],
baz=[
('id', ),
(3,),
(4,)
],
related=[
('id', ),
(1,),
(2,)
]
)
@classmethod
def setup_mappers(cls):
mapper(cls.classes.Foo, cls.tables.foo, properties={
'related': relationship(cls.classes.Related)
}, polymorphic_on=cls.tables.foo.c.type)
mapper(cls.classes.Bar, cls.tables.bar, polymorphic_identity='bar',
inherits=cls.classes.Foo)
mapper(cls.classes.Baz, cls.tables.baz, polymorphic_identity='baz',
inherits=cls.classes.Foo)
mapper(cls.classes.Related, cls.tables.related)
def test_caches_query_per_base_subq(self):
Foo, Bar, Baz, Related = self.classes.Foo, self.classes.Bar, \
self.classes.Baz, self.classes.Related
s = Session(testing.db)
def go():
eq_(
s.query(Foo).with_polymorphic([Bar, Baz]).\
order_by(Foo.id).\
options(subqueryload(Foo.related)).all(),
[
Bar(id=1, related=Related(id=1)),
Bar(id=2, related=Related(id=2)),
Baz(id=3, related=Related(id=1)),
Baz(id=4, related=Related(id=2))
]
)
self.assert_sql_count(testing.db, go, 2)
def test_caches_query_per_base_joined(self):
# technically this should be in test_eager_relations
Foo, Bar, Baz, Related = self.classes.Foo, self.classes.Bar, \
self.classes.Baz, self.classes.Related
s = Session(testing.db)
def go():
eq_(
s.query(Foo).with_polymorphic([Bar, Baz]).\
order_by(Foo.id).\
options(joinedload(Foo.related)).all(),
[
Bar(id=1, related=Related(id=1)),
Bar(id=2, related=Related(id=2)),
Baz(id=3, related=Related(id=1)),
Baz(id=4, related=Related(id=2))
]
)
self.assert_sql_count(testing.db, go, 1)
class CyclicalInheritingEagerTestOne(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('t1', metadata,
Column('c1', Integer, primary_key=True, test_needs_autoincrement=True),
Column('c2', String(30)),
Column('type', String(30))
)
Table('t2', metadata,
Column('c1', Integer, primary_key=True, test_needs_autoincrement=True),
Column('c2', String(30)),
Column('type', String(30)),
Column('t1.id', Integer, ForeignKey('t1.c1')))
def test_basic(self):
t2, t1 = self.tables.t2, self.tables.t1
class T(object):
pass
class SubT(T):
pass
class T2(object):
pass
class SubT2(T2):
pass
mapper(T, t1, polymorphic_on=t1.c.type, polymorphic_identity='t1')
mapper(SubT, None, inherits=T, polymorphic_identity='subt1', properties={
't2s': relationship(SubT2, lazy='subquery',
backref=sa.orm.backref('subt', lazy='subquery'))
})
mapper(T2, t2, polymorphic_on=t2.c.type, polymorphic_identity='t2')
mapper(SubT2, None, inherits=T2, polymorphic_identity='subt2')
# testing a particular endless loop condition in eager load setup
create_session().query(SubT).all()
class CyclicalInheritingEagerTestTwo(fixtures.DeclarativeMappedTest,
testing.AssertsCompiledSQL):
__dialect__ = 'default'
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class PersistentObject(Base):
__tablename__ = 'persistent'
id = Column(Integer, primary_key=True, test_needs_autoincrement=True)
class Movie(PersistentObject):
__tablename__ = 'movie'
id = Column(Integer, ForeignKey('persistent.id'), primary_key=True)
director_id = Column(Integer, ForeignKey('director.id'))
title = Column(String(50))
class Director(PersistentObject):
__tablename__ = 'director'
id = Column(Integer, ForeignKey('persistent.id'), primary_key=True)
movies = relationship("Movie", foreign_keys=Movie.director_id)
name = Column(String(50))
def test_from_subclass(self):
Director = self.classes.Director
s = create_session()
ctx = s.query(Director).options(subqueryload('*'))._compile_context()
q = ctx.attributes[('subquery',
(inspect(Director), inspect(Director).attrs.movies))]
self.assert_compile(q,
"SELECT movie.id AS movie_id, persistent.id AS persistent_id, "
"movie.director_id AS movie_director_id, "
"movie.title AS movie_title, "
"anon_1.director_id AS anon_1_director_id "
"FROM (SELECT director.id AS director_id "
"FROM persistent JOIN director "
"ON persistent.id = director.id) AS anon_1 "
"JOIN (persistent JOIN movie ON persistent.id = movie.id) "
"ON anon_1.director_id = movie.director_id "
"ORDER BY anon_1.director_id",
dialect="default"
)
def test_integrate(self):
Director = self.classes.Director
Movie = self.classes.Movie
session = Session(testing.db)
rscott = Director(name="Ridley Scott")
alien = Movie(title="Alien")
brunner = Movie(title="Blade Runner")
rscott.movies.append(brunner)
rscott.movies.append(alien)
session.add_all([rscott, alien, brunner])
session.commit()
session.close_all()
d = session.query(Director).options(subqueryload('*')).first()
assert len(list(session)) == 3
class SubqueryloadDistinctTest(fixtures.DeclarativeMappedTest,
testing.AssertsCompiledSQL):
__dialect__ = 'default'
run_inserts = 'once'
run_deletes = None
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Director(Base):
__tablename__ = 'director'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
class DirectorPhoto(Base):
__tablename__ = 'director_photo'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
path = Column(String(255))
director_id = Column(Integer, ForeignKey('director.id'))
director = relationship(Director, backref="photos")
class Movie(Base):
__tablename__ = 'movie'
id = Column(Integer, primary_key=True, test_needs_autoincrement=True)
director_id = Column(Integer, ForeignKey('director.id'))
director = relationship(Director, backref="movies")
title = Column(String(50))
credits = relationship("Credit", backref="movie")
class Credit(Base):
__tablename__ = 'credit'
id = Column(Integer, primary_key=True, test_needs_autoincrement=True)
movie_id = Column(Integer, ForeignKey('movie.id'))
@classmethod
def insert_data(cls):
Movie = cls.classes.Movie
Director = cls.classes.Director
DirectorPhoto = cls.classes.DirectorPhoto
Credit = cls.classes.Credit
d = Director(name='Woody Allen')
d.photos = [DirectorPhoto(path='/1.jpg'),
DirectorPhoto(path='/2.jpg')]
d.movies = [Movie(title='Manhattan', credits=[Credit(), Credit()]),
Movie(title='Sweet and Lowdown', credits=[Credit()])]
sess = create_session()
sess.add_all([d])
sess.flush()
def test_distinct_strategy_opt_m2o(self):
self._run_test_m2o(True, None)
self._run_test_m2o(False, None)
def test_distinct_unrelated_opt_m2o(self):
self._run_test_m2o(None, True)
self._run_test_m2o(None, False)
def _run_test_m2o(self,
director_strategy_level,
photo_strategy_level):
# test where the innermost is m2o, e.g.
# Movie->director
Movie = self.classes.Movie
Director = self.classes.Director
Movie.director.property.distinct_target_key = director_strategy_level
Director.photos.property.distinct_target_key = photo_strategy_level
# the DISTINCT is controlled by
# only the Movie->director relationship, *not* the
# Director.photos
expect_distinct = director_strategy_level in (True, None)
s = create_session()
q = (
s.query(Movie)
.options(
subqueryload(Movie.director)
.subqueryload(Director.photos)
)
)
ctx = q._compile_context()
q2 = ctx.attributes[
('subquery', (inspect(Movie), inspect(Movie).attrs.director))
]
self.assert_compile(
q2,
'SELECT director.id AS director_id, '
'director.name AS director_name, '
'anon_1.movie_director_id AS anon_1_movie_director_id '
'FROM (SELECT%s movie.director_id AS movie_director_id '
'FROM movie) AS anon_1 '
'JOIN director ON director.id = anon_1.movie_director_id '
'ORDER BY anon_1.movie_director_id' % (
" DISTINCT" if expect_distinct else "")
)
ctx2 = q2._compile_context()
result = s.execute(q2)
rows = result.fetchall()
if expect_distinct:
eq_(rows, [
(1, 'Woody Allen', 1),
])
else:
eq_(rows, [
(1, 'Woody Allen', 1), (1, 'Woody Allen', 1),
])
q3 = ctx2.attributes[
('subquery', (inspect(Director), inspect(Director).attrs.photos))
]
self.assert_compile(
q3,
'SELECT director_photo.id AS director_photo_id, '
'director_photo.path AS director_photo_path, '
'director_photo.director_id AS director_photo_director_id, '
'director_1.id AS director_1_id '
'FROM (SELECT%s movie.director_id AS movie_director_id '
'FROM movie) AS anon_1 '
'JOIN director AS director_1 ON director_1.id = anon_1.movie_director_id '
'JOIN director_photo ON director_1.id = director_photo.director_id '
'ORDER BY director_1.id' % (
" DISTINCT" if expect_distinct else "")
)
result = s.execute(q3)
rows = result.fetchall()
if expect_distinct:
eq_(set(tuple(t) for t in rows), set([
(1, '/1.jpg', 1, 1),
(2, '/2.jpg', 1, 1),
]))
else:
# oracle might not order the way we expect here
eq_(set(tuple(t) for t in rows), set([
(1, '/1.jpg', 1, 1),
(2, '/2.jpg', 1, 1),
(1, '/1.jpg', 1, 1),
(2, '/2.jpg', 1, 1),
]))
movies = q.all()
# check number of persistent objects in session
eq_(len(list(s)), 5)
def test_cant_do_distinct_in_joins(self):
"""the DISTINCT feature here works when the m2o is in the innermost
mapper, but when we are just joining along relationships outside
of that, we can still have dupes, and there's no solution to that.
"""
Movie = self.classes.Movie
Credit = self.classes.Credit
s = create_session()
q = (
s.query(Credit)
.options(
subqueryload(Credit.movie)
.subqueryload(Movie.director)
)
)
ctx = q._compile_context()
q2 = ctx.attributes[
('subquery', (inspect(Credit), Credit.movie.property))
]
ctx2 = q2._compile_context()
q3 = ctx2.attributes[
('subquery', (inspect(Movie), Movie.director.property))
]
result = s.execute(q3)
eq_(
result.fetchall(),
[
(1, 'Woody Allen', 1), (1, 'Woody Allen', 1),
]
)
class JoinedNoLoadConflictTest(fixtures.DeclarativeMappedTest):
"""test for [ticket:2887]"""
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Parent(ComparableEntity, Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True, test_needs_autoincrement=True)
name = Column(String(20))
children = relationship('Child',
back_populates='parent',
lazy='noload'
)
class Child(ComparableEntity, Base):
__tablename__ = 'child'
id = Column(Integer, primary_key=True, test_needs_autoincrement=True)
name = Column(String(20))
parent_id = Column(Integer, ForeignKey('parent.id'))
parent = relationship('Parent', back_populates='children', lazy='joined')
@classmethod
def insert_data(cls):
Parent = cls.classes.Parent
Child = cls.classes.Child
s = Session()
s.add(Parent(name='parent', children=[Child(name='c1')]))
s.commit()
def test_subqueryload_on_joined_noload(self):
Parent = self.classes.Parent
Child = self.classes.Child
s = Session()
# here we have Parent->subqueryload->Child->joinedload->parent->noload->children.
# the actual subqueryload has to emit *after* we've started populating
# Parent->subqueryload->child.
parent = s.query(Parent).options([subqueryload('children')]).first()
eq_(
parent.children,
[Child(name='c1')]
)
| mit |
adamjmcgrath/glancydesign | django/conf/locale/en_GB/formats.py | 234 | 2048 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y' # 'Oct. 25, 2006'
TIME_FORMAT = 'P' # '2:30 pm'
DATETIME_FORMAT = 'N j, Y, P' # 'Oct. 25, 2006, 2:30 pm'
YEAR_MONTH_FORMAT = 'F Y' # 'October 2006'
MONTH_DAY_FORMAT = 'F j' # 'October 25'
SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006'
SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 pm'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%Y-%m-%d', # '2006-10-25'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| bsd-3-clause |
maccaspacca/BismuthToolsWeb | log.py | 3 | 1767 | import logging, sys
from logging.handlers import RotatingFileHandler
def filter_status(record):
""""
Only displays log messages about status info
or ERROR level
"""
if ("Status:" in str(record.msg)) or (record.levelname == 'ERROR'):
return 1
else:
return 0
def log(logFile, level_input="WARNING", terminal_output=False):
if level_input == "NOTSET":
level = logging.NOTSET
if level_input == "DEBUG":
level = logging.DEBUG
if level_input == "INFO":
level = logging.INFO
if level_input == "WARNING":
level = logging.WARNING
if level_input == "ERROR":
level = logging.ERROR
if level_input == "CRITICAL":
level = logging.CRITICAL
log_formatter = logging.Formatter('%(asctime)s %(levelname)s %(funcName)s(%(lineno)d) %(message)s')
my_handler = RotatingFileHandler(logFile, mode='a', maxBytes=5 * 1024 * 1024, backupCount=2, encoding=None, delay=0)
my_handler.setFormatter(log_formatter)
my_handler.setLevel(level)
app_log = logging.getLogger('root')
app_log.setLevel(level)
app_log.addHandler(my_handler)
# This part is what goes on console.
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(level)
# TODO: We could have 2 level in the config, one for screen and one for files.
print ("Logging level: {} ({})".format(level_input,level))
if terminal_output != True:
ch.addFilter(filter_status)
# No need for complete func and line info here.
formatter = logging.Formatter('%(asctime)s %(message)s')
else:
formatter = logging.Formatter('%(asctime)s %(funcName)s(%(lineno)d) %(message)s')
ch.setFormatter(formatter)
app_log.addHandler(ch)
return app_log
| gpl-3.0 |
foobarbazblarg/stayclean | stayclean-2019-march/display.py | 26 | 24079 | #!/usr/bin/python
# TODO: issues with new oauth2 stuff. Keep using older version of Python for now.
# #!/usr/bin/env python
from participantCollection import ParticipantCollection
import re
import datetime
import pyperclip
# Edit Me!
currentMonthTotalDays = 31
currentMonthIndex = datetime.date.today().month
currentMonthPenultimateDayIndex = currentMonthTotalDays - 1
currentMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[nextMonthIndex]
currentDayOfMonthIndex = datetime.date.today().day
# TODO: testing...
# currentDayOfMonthIndex = 31
currentDayOfMonthName = {1: 'first', 2: 'second', 3: 'third', 4: 'fourth', 5: 'fifth', 6: 'sixth', 7: 'seventh', 8: 'eighth', 9: 'ninth', 10: 'tenth', 11: 'eleventh', 12: 'twelfth', 13: 'thirteenth', 14: 'fourteenth', 15: 'fifteenth', 16: 'sixteenth', 17: 'seventeenth', 18: 'eighteenth', 19: 'nineteenth', 20: 'twentieth', 21: 'twenty-first', 22: 'twenty-second', 23: 'twenty-third', 24: 'twenty-fourth', 25: 'twenty-fifth', 26: 'twenty-sixth', 27: 'twenty-seventh', 28: 'twenty-eighth', 29: 'twenty-ninth', 30: 'thirtieth', 31: 'thirty-first'}[currentDayOfMonthIndex]
currentDayOfWeekName = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'}[datetime.date.today().weekday()]
participants = ParticipantCollection()
numberStillIn = participants.sizeOfParticipantsWhoAreStillIn()
initialNumber = participants.size()
percentStillIn = int(round(100 * numberStillIn / initialNumber, 0))
# print "There are currently **" + str(numberStillIn) + " out of " + str(initialNumber) +"** original participants. That's **" + str(int(round(100*numberStillIn/initialNumber,0))) + "%** Here is the list of participants still with the challenge:\n"
def stringToPrintLegacy():
answer = "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer = re.sub('NUMBER_STILL_IN', str(numberStillIn), answer)
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('PERCENT_STILL_IN', str(percentStillIn), answer)
for participant in participants.participantsWhoAreStillIn():
answer += "/u/" + participant.name
if not participant.hasCheckedIn:
answer += " ~"
answer += "\n\n"
return answer
def templateForParticipants():
answer = ""
for participant in participants.participantsWhoAreStillIn():
answer += "/u/" + participant.name
if not participant.hasCheckedIn:
answer += " ~"
answer += "\n\n"
return answer
def templateForParticipantsOnFinalDay():
answer = ""
answer += "These participants have checked in at least once in the last 15 days:\n"
answer += "\n"
for participant in participants.participantsWhoAreStillInAndHaveCheckedIn():
answer += "/u/" + participant.name + "\n"
answer += "\n"
answer += "These participants have not reported a relapse, so they are still in the running, but **if they do not check in by the end of today, they will be removed from the list, and will not be considered victorious**:\n"
answer += "\n"
for participant in participants.participantsWhoAreStillInAndHaveNotCheckedIn():
answer += "/u/" + participant.name + " ~\n"
answer += "\n"
return answer
def templateFor1():
print '1\n\n'
answer = ""
print "============================================================="
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. ~~We will no longer be accepting new signups.~~ Good news! We will be be accepting late signups for the next 3 days. If you forgot to sign up for the CURRENT_MONTH_NAME challenge, just leave a \"sign me up\" comment below, and I'll add you. Best of luck to everyone here!\n"
answer += "\n"
answer += "Here's how this thing works:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- ~~We will not be accepting any new participants~~, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "Here are our **INITIAL_NUMBER** original participants:\n\n"
answer += templateForParticipants()
print "============================================================="
return answer
def templateFor2():
print '2\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. This is the second day of our 3 day late-signup grace period. If you forgot to sign up for the CURRENT_MONTH_NAME challenge, just leave a \"sign me up\" comment below, and I'll add you.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- ~~We will not be accepting any new participants~~, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor3():
print '3\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. This is the last day of our 3 day late-signup grace period. If you forgot to sign up for the CURRENT_MONTH_NAME challenge, just leave a \"sign me up\" comment below, and I'll add you. After today, further signup requests will be silently ignored.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- ~~We will not be accepting any new participants~~, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor4():
print '4\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. Our 3 day late-signup grace period is now over. If you forgot to sign up, it's too late for CURRENT_MONTH_NAME, but feel free to leave comments here anyway, and we'll see you in NEXT_MONTH_NAME.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor5to9():
print '5 to 9\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor10to14():
print '10 to 14\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THE COUNTDOWN: Attention everyone!** You have " + str(15 - currentDayOfMonthIndex) + " days to make an update comment (if you haven't already) to be counted as an active participant! **Otherwise your name will be REMOVED from the list** on CURRENT_MONTH_INDEX/15!!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor15():
print '15\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THIS IS YOUR LAST DAY TO CHECK IN** (if you haven't already) **BEFORE YOUR NAME IS REMOVED FROM THE LIST!** Check in by posting a brief comment.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor16toPenultimate():
print '16 to penultimate\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "If you think you should still be on this list but aren't, you probably got removed in the great purge of CURRENT_MONTH_NAME 15th because you never checked in. However, if you let me know you're still with it I will re-add you.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads since CURRENT_MONTH_NAME 15. If it is still there by CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateForUltimate():
print 'Ultimate\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the last day of the Stay Clean CURRENT_MONTH_NAME challenge. This is it, folks, the day we've been waiting for... the final day of the challenge. I'll be making a congratulatory post tomorrow to honor the victors. I'm really proud of everyone who signed up for this challenge. Quitting porn is difficult, especially in an era where porn is always as close as a few keystrokes, and triggers are absolutely everywhere. Everybody who gave it their best shot deserves to take a minute right now to feel good about themselves.\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
# TODO: need to do the part where it lists the checked in and non-checked in participants separately.
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**.\n\n"
answer += templateForParticipantsOnFinalDay()
return answer
def templateToUse():
if currentDayOfMonthIndex == 1:
return templateFor1()
elif currentDayOfMonthIndex == 2:
return templateFor2()
elif currentDayOfMonthIndex == 3:
return templateFor3()
elif currentDayOfMonthIndex == 4:
return templateFor4()
elif 5 <= currentDayOfMonthIndex <= 9:
return templateFor5to9()
elif 10 <= currentDayOfMonthIndex <= 14:
return templateFor10to14()
if currentDayOfMonthIndex == 15:
return templateFor15()
elif (currentDayOfMonthIndex >= 16) and (currentDayOfMonthIndex <= currentMonthPenultimateDayIndex):
return templateFor16toPenultimate()
else:
return templateForUltimate()
def stringToPrint():
answer = templateToUse()
answer = re.sub('NUMBER_STILL_IN', str(numberStillIn), answer)
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('PERCENT_STILL_IN', str(percentStillIn), answer)
answer = re.sub('CURRENT_MONTH_INDEX', str(currentMonthIndex), answer)
answer = re.sub('CURRENT_MONTH_TOTAL_DAYS', str(currentMonthTotalDays), answer)
answer = re.sub('CURRENT_MONTH_PENULTIMATE_DAY_INDEX', str(currentMonthPenultimateDayIndex), answer)
answer = re.sub('CURRENT_MONTH_NAME', currentMonthName, answer)
answer = re.sub('NEXT_MONTH_INDEX', str(nextMonthIndex), answer)
answer = re.sub('NEXT_MONTH_NAME', nextMonthName, answer)
answer = re.sub('CURRENT_DAY_OF_MONTH_INDEX', str(currentDayOfMonthIndex), answer)
answer = re.sub('CURRENT_DAY_OF_MONTH_NAME', currentDayOfMonthName, answer)
answer = re.sub('CURRENT_DAY_OF_WEEK_NAME', currentDayOfWeekName, answer)
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString)
| mit |
Ircam-Web/mezzanine-organization | organization/projects/migrations/0055_auto_20170323_1039.py | 1 | 2059 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2017-03-23 09:39
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('organization-projects', '0054_auto_20170323_0815'),
]
operations = [
migrations.RemoveField(
model_name='projectpublicdata',
name='period',
),
migrations.AddField(
model_name='projectpublicdata',
name='implementation_duration',
field=models.CharField(default='1', help_text='Possible duration of implementation in months (must be part of the project implementation workplan) (months)', max_length=128, verbose_name='implementation duration'),
),
migrations.AddField(
model_name='projectpublicdata',
name='implementation_start_date',
field=models.DateField(default=datetime.datetime(2017, 3, 23, 9, 39, 34, 20133, tzinfo=utc), help_text='Possible start date of implementation (must be part of the project implementation workplan) (DD/MM/YYYY)', verbose_name='implementation start date'),
preserve_default=False,
),
migrations.AlterField(
model_name='project',
name='date_from',
field=models.DateField(blank=True, null=True, verbose_name='start date'),
),
migrations.AlterField(
model_name='projectcall',
name='date_from',
field=models.DateField(blank=True, null=True, verbose_name='start date'),
),
migrations.AlterField(
model_name='projectresidency',
name='date_from',
field=models.DateField(blank=True, null=True, verbose_name='start date'),
),
migrations.AlterField(
model_name='projectworkpackage',
name='date_from',
field=models.DateField(blank=True, null=True, verbose_name='start date'),
),
]
| agpl-3.0 |
YongseopKim/crosswalk-test-suite | webapi/tct-widgetpolicy-w3c-tests/inst.xpk.py | 357 | 6759 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex+1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t xpk -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t xpk -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
StyXman/ayrton | ayrton/parser/pyparser/pylexer.py | 1 | 8358 | # Used by genpytokenize.py to generate the parser in pytokenize.py
from ayrton.parser.pyparser.automata import DFA, DEFAULT
class EMPTY: pass
def newArcPair (states, transitionLabel):
s1Index = len(states)
s2Index = s1Index + 1
states.append([(transitionLabel, s2Index)])
states.append([])
return s1Index, s2Index
# ______________________________________________________________________
def chain (states, *stateIndexPairs):
if len(stateIndexPairs) > 1:
start, lastFinish = stateIndexPairs[0]
for nStart, nFinish in stateIndexPairs[1:]:
states[lastFinish].append((EMPTY, nStart))
lastFinish = nFinish
return start, nFinish
else:
return stateIndexPairs[0]
# ______________________________________________________________________
def chainStr (states, s):
return chain(states, *map(lambda x : newArcPair(states, x), s))
# ______________________________________________________________________
def notChainStr (states, s):
"""XXX I'm not sure this is how it should be done, but I'm going to
try it anyway. Note that for this case, I require only single character
arcs, since I would have to basically invert all accepting states and
non-accepting states of any sub-NFA's.
"""
assert len(s) > 0
arcs = list(map(lambda x : newArcPair(states, x), s))
finish = len(states)
states.append([])
start, lastFinish = arcs[0]
states[start].append((EMPTY, finish))
for crntStart, crntFinish in arcs[1:]:
states[lastFinish].append((EMPTY, crntStart))
states[crntStart].append((EMPTY, finish))
return start, finish
# ______________________________________________________________________
def group (states, *stateIndexPairs):
if len(stateIndexPairs) > 1:
start = len(states)
finish = start + 1
startList = []
states.append(startList)
states.append([])
for eStart, eFinish in stateIndexPairs:
startList.append((EMPTY, eStart))
states[eFinish].append((EMPTY, finish))
return start, finish
else:
return stateIndexPairs[0]
# ______________________________________________________________________
def groupStr (states, s):
return group(states, *map(lambda x : newArcPair(states, x), s))
# ______________________________________________________________________
def notGroup (states, *stateIndexPairs):
"""Like group, but will add a DEFAULT transition to a new end state,
causing anything in the group to not match by going to a dead state.
XXX I think this is right...
"""
start, dead = group(states, *stateIndexPairs)
finish = len(states)
states.append([])
states[start].append((DEFAULT, finish))
return start, finish
# ______________________________________________________________________
def notGroupStr (states, s):
return notGroup(states, *map(lambda x : newArcPair(states, x), s))
# ______________________________________________________________________
def any (states, *stateIndexPairs):
start, finish = group(states, *stateIndexPairs)
states[finish].append((EMPTY, start))
return start, start
# ______________________________________________________________________
def maybe (states, *stateIndexPairs):
start, finish = group(states, *stateIndexPairs)
states[start].append((EMPTY, finish))
return start, finish
# ______________________________________________________________________
def atleastonce (states, *stateIndexPairs):
start, finish = group(states, *stateIndexPairs)
states[finish].append((EMPTY, start))
return start, finish
# ______________________________________________________________________
def closure (states, start, result = 0):
if None == result:
result = 0
if 0 == (result & (1 << start)):
result |= (1 << start)
for label, arrow in states[start]:
if label == EMPTY:
result |= closure(states, arrow, result)
return result
# ______________________________________________________________________
def nfaToDfa (states, start, finish):
tempStates = []
startClosure = closure(states, start)
crntTempState = [startClosure, [], 0 != (startClosure & (1 << finish))]
tempStates.append(crntTempState)
index = 0
while index < len(tempStates):
crntTempState = tempStates[index]
crntClosure, crntArcs, crntAccept = crntTempState
for index2 in range(0, len(states)):
if 0 != (crntClosure & (1 << index2)):
for label, nfaArrow in states[index2]:
if label == EMPTY:
continue
foundTempArc = False
for tempArc in crntArcs:
if tempArc[0] == label:
foundTempArc = True
break
if not foundTempArc:
tempArc = [label, -1, 0]
crntArcs.append(tempArc)
tempArc[2] = closure(states, nfaArrow, tempArc[2])
for arcIndex in range(0, len(crntArcs)):
label, arrow, targetStates = crntArcs[arcIndex]
targetFound = False
arrow = 0
for destTempState in tempStates:
if destTempState[0] == targetStates:
targetFound = True
break
arrow += 1
if not targetFound:
assert arrow == len(tempStates)
newState = [targetStates, [], 0 != (targetStates &
(1 << finish))]
tempStates.append(newState)
crntArcs[arcIndex][1] = arrow
index += 1
tempStates = simplifyTempDfa(tempStates)
states = finalizeTempDfa(tempStates)
return states
# ______________________________________________________________________
def sameState (s1, s2):
"""sameState(s1, s2)
Note:
state := [ nfaclosure : Long, [ arc ], accept : Boolean ]
arc := [ label, arrow : Int, nfaClosure : Long ]
"""
if (len(s1[1]) != len(s2[1])) or (s1[2] != s2[2]):
return False
for arcIndex in range(0, len(s1[1])):
arc1 = s1[1][arcIndex]
arc2 = s2[1][arcIndex]
if arc1[:-1] != arc2[:-1]:
return False
return True
# ______________________________________________________________________
def simplifyTempDfa (tempStates):
"""simplifyTempDfa (tempStates)
"""
changes = True
deletedStates = []
while changes:
changes = False
for i in range(1, len(tempStates)):
if i in deletedStates:
continue
for j in range(0, i):
if j in deletedStates:
continue
if sameState(tempStates[i], tempStates[j]):
deletedStates.append(i)
for k in range(0, len(tempStates)):
if k in deletedStates:
continue
for arc in tempStates[k][1]:
if arc[1] == i:
arc[1] = j
changes = True
break
for stateIndex in deletedStates:
tempStates[stateIndex] = None
return tempStates
# ______________________________________________________________________
def finalizeTempDfa (tempStates):
"""finalizeTempDfa (tempStates)
Input domain:
tempState := [ nfaClosure : Long, [ tempArc ], accept : Boolean ]
tempArc := [ label, arrow, nfaClosure ]
Output domain:
state := [ arcMap, accept : Boolean ]
"""
states = []
accepts = []
stateMap = {}
tempIndex = 0
for tempIndex in range(0, len(tempStates)):
tempState = tempStates[tempIndex]
if None != tempState:
stateMap[tempIndex] = len(states)
states.append({})
accepts.append(tempState[2])
for tempIndex in stateMap.keys():
stateBitset, tempArcs, accepting = tempStates[tempIndex]
newIndex = stateMap[tempIndex]
arcMap = states[newIndex]
for tempArc in tempArcs:
arcMap[tempArc[0]] = stateMap[tempArc[1]]
return states, accepts
| gpl-3.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/django/conf/locale/hr/formats.py | 504 | 2106 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. E Y.'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. E Y. H:i'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y.'
SHORT_DATETIME_FORMAT = 'j.m.Y. H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.'
'%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59'
'%d.%m.%Y. %H:%M:%S.%f', # '25.10.2006. 14:30:59.000200'
'%d.%m.%Y. %H:%M', # '25.10.2006. 14:30'
'%d.%m.%Y.', # '25.10.2006.'
'%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59'
'%d.%m.%y. %H:%M:%S.%f', # '25.10.06. 14:30:59.000200'
'%d.%m.%y. %H:%M', # '25.10.06. 14:30'
'%d.%m.%y.', # '25.10.06.'
'%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59'
'%d. %m. %Y. %H:%M:%S.%f', # '25. 10. 2006. 14:30:59.000200'
'%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30'
'%d. %m. %Y.', # '25. 10. 2006.'
'%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59'
'%d. %m. %y. %H:%M:%S.%f', # '25. 10. 06. 14:30:59.000200'
'%d. %m. %y. %H:%M', # '25. 10. 06. 14:30'
'%d. %m. %y.', # '25. 10. 06.'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| gpl-3.0 |
rwhitt2049/tiko | setup.py | 1 | 1753 | from codecs import open
from os import path
from setuptools import find_packages, setup
from tiko import __version__
base = path.abspath(path.dirname(__file__))
def install_requires():
with open(path.join(base, 'requirements.txt'), encoding='utf-8') as file:
return file.read().splitlines()
def dev_requires():
with open(path.join(base, 'dev_requirements.txt'), encoding='utf-8') as file:
return file.read().splitlines()
def long_description():
with open(path.join(base, 'README.rst'), encoding='utf-8') as file:
return file.read()
kwargs = dict(
name='tiko',
version=__version__,
description='Extract features from time series data for machine learning',
long_description=long_description(),
author='Ry Whittington',
author_email='rwhitt2049@gmail.com',
license='MIT',
url='https://github.com/rwhitt2049/tiko',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering'
],
keywords='time_series, timeseries, iot, sensor, machine_learning',
packages=find_packages(exclude=['contrib', 'documentation', 'tests*']),
install_requires=install_requires(),
package_data={},
data_files=[],
entry_points={},
test_suite='tests',
tests_require=dev_requires()
)
setup(**kwargs)
| mit |
pcreech/pulp_ostree | plugins/test/unit/plugins/distributors/test_configuration.py | 2 | 2325 | import os
import shutil
import tempfile
import unittest
import mock
from pulp.plugins.config import PluginCallConfiguration
from pulp_ostree.common import constants
from pulp_ostree.plugins.distributors import configuration
class TestConfigurationGetters(unittest.TestCase):
def setUp(self):
self.working_directory = tempfile.mkdtemp()
self.publish_dir = os.path.join(self.working_directory, 'publish')
self.repo_working = os.path.join(self.working_directory, 'work')
self.repo = mock.Mock(id='foo', working_dir=self.repo_working)
self.config = PluginCallConfiguration({constants.DISTRIBUTOR_CONFIG_KEY_PUBLISH_DIRECTORY:
self.publish_dir}, {})
def tearDown(self):
shutil.rmtree(self.working_directory)
def test_get_root_publish_directory(self):
directory = configuration.get_root_publish_directory(self.config)
self.assertEquals(directory, self.publish_dir)
def test_get_master_publish_dir(self):
directory = configuration.get_master_publish_dir(self.repo, self.config)
self.assertEquals(directory, os.path.join(self.publish_dir, 'master', self.repo.repo_id))
def test_get_web_publish_dir(self):
directory = configuration.get_web_publish_dir(self.repo, self.config)
self.assertEquals(directory, os.path.join(self.publish_dir, 'web', self.repo.repo_id))
def test_get_repo_relative_path(self):
directory = configuration.get_repo_relative_path(self.repo, {})
self.assertEquals(directory, self.repo.repo_id)
def test_get_repo_relative_path_when_passed(self):
relative_path = '/7/x86/standard'
config = {
constants.DISTRIBUTOR_CONFIG_KEY_RELATIVE_PATH: relative_path
}
directory = configuration.get_repo_relative_path(self.repo, config)
self.assertEquals(directory, relative_path[1:])
class TestValidateConfig(unittest.TestCase):
def test_server_url_fully_qualified(self):
m_repo = mock.MagicMock()
conduit = mock.MagicMock()
conduit.get_repo_distributors_by_relative_url.return_value = []
config = PluginCallConfiguration({}, {})
self.assertEquals(
(True, None), configuration.validate_config(m_repo, config, conduit))
| gpl-2.0 |
MihaiMoldovanu/ansible | test/units/modules/network/nxos/test_nxos_vrf_af.py | 25 | 2615 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_vrf_af
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosVrfafModule(TestNxosModule):
module = nxos_vrf_af
def setUp(self):
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_vrf_af.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_vrf_af.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None, device=''):
self.load_config.return_value = None
def test_nxos_vrf_af_present(self):
set_module_args(dict(vrf='ntc', afi='ipv4', safi='unicast', state='present'))
result = self.execute_module(changed=True)
self.assertEqual(sorted(result['commands']), sorted(['vrf context ntc',
'address-family ipv4 unicast']))
def test_nxos_vrf_af_absent(self):
set_module_args(dict(vrf='ntc', afi='ipv4', safi='unicast', state='absent'))
result = self.execute_module(changed=False)
self.assertEqual(result['commands'], [])
def test_nxos_vrf_af_route_target(self):
set_module_args(dict(vrf='ntc', afi='ipv4', safi='unicast', route_target_both_auto_evpn=True))
result = self.execute_module(changed=True)
self.assertEqual(sorted(result['commands']), sorted(['vrf context ntc',
'address-family ipv4 unicast',
'route-target both auto evpn']))
| gpl-3.0 |
maxkoryukov/headphones | lib/unidecode/x063.py | 252 | 4656 | data = (
'Bo ', # 0x00
'Chi ', # 0x01
'Gua ', # 0x02
'Zhi ', # 0x03
'Kuo ', # 0x04
'Duo ', # 0x05
'Duo ', # 0x06
'Zhi ', # 0x07
'Qie ', # 0x08
'An ', # 0x09
'Nong ', # 0x0a
'Zhen ', # 0x0b
'Ge ', # 0x0c
'Jiao ', # 0x0d
'Ku ', # 0x0e
'Dong ', # 0x0f
'Ru ', # 0x10
'Tiao ', # 0x11
'Lie ', # 0x12
'Zha ', # 0x13
'Lu ', # 0x14
'Die ', # 0x15
'Wa ', # 0x16
'Jue ', # 0x17
'Mushiru ', # 0x18
'Ju ', # 0x19
'Zhi ', # 0x1a
'Luan ', # 0x1b
'Ya ', # 0x1c
'Zhua ', # 0x1d
'Ta ', # 0x1e
'Xie ', # 0x1f
'Nao ', # 0x20
'Dang ', # 0x21
'Jiao ', # 0x22
'Zheng ', # 0x23
'Ji ', # 0x24
'Hui ', # 0x25
'Xun ', # 0x26
'Ku ', # 0x27
'Ai ', # 0x28
'Tuo ', # 0x29
'Nuo ', # 0x2a
'Cuo ', # 0x2b
'Bo ', # 0x2c
'Geng ', # 0x2d
'Ti ', # 0x2e
'Zhen ', # 0x2f
'Cheng ', # 0x30
'Suo ', # 0x31
'Suo ', # 0x32
'Keng ', # 0x33
'Mei ', # 0x34
'Long ', # 0x35
'Ju ', # 0x36
'Peng ', # 0x37
'Jian ', # 0x38
'Yi ', # 0x39
'Ting ', # 0x3a
'Shan ', # 0x3b
'Nuo ', # 0x3c
'Wan ', # 0x3d
'Xie ', # 0x3e
'Cha ', # 0x3f
'Feng ', # 0x40
'Jiao ', # 0x41
'Wu ', # 0x42
'Jun ', # 0x43
'Jiu ', # 0x44
'Tong ', # 0x45
'Kun ', # 0x46
'Huo ', # 0x47
'Tu ', # 0x48
'Zhuo ', # 0x49
'Pou ', # 0x4a
'Le ', # 0x4b
'Ba ', # 0x4c
'Han ', # 0x4d
'Shao ', # 0x4e
'Nie ', # 0x4f
'Juan ', # 0x50
'Ze ', # 0x51
'Song ', # 0x52
'Ye ', # 0x53
'Jue ', # 0x54
'Bu ', # 0x55
'Huan ', # 0x56
'Bu ', # 0x57
'Zun ', # 0x58
'Yi ', # 0x59
'Zhai ', # 0x5a
'Lu ', # 0x5b
'Sou ', # 0x5c
'Tuo ', # 0x5d
'Lao ', # 0x5e
'Sun ', # 0x5f
'Bang ', # 0x60
'Jian ', # 0x61
'Huan ', # 0x62
'Dao ', # 0x63
'[?] ', # 0x64
'Wan ', # 0x65
'Qin ', # 0x66
'Peng ', # 0x67
'She ', # 0x68
'Lie ', # 0x69
'Min ', # 0x6a
'Men ', # 0x6b
'Fu ', # 0x6c
'Bai ', # 0x6d
'Ju ', # 0x6e
'Dao ', # 0x6f
'Wo ', # 0x70
'Ai ', # 0x71
'Juan ', # 0x72
'Yue ', # 0x73
'Zong ', # 0x74
'Chen ', # 0x75
'Chui ', # 0x76
'Jie ', # 0x77
'Tu ', # 0x78
'Ben ', # 0x79
'Na ', # 0x7a
'Nian ', # 0x7b
'Nuo ', # 0x7c
'Zu ', # 0x7d
'Wo ', # 0x7e
'Xi ', # 0x7f
'Xian ', # 0x80
'Cheng ', # 0x81
'Dian ', # 0x82
'Sao ', # 0x83
'Lun ', # 0x84
'Qing ', # 0x85
'Gang ', # 0x86
'Duo ', # 0x87
'Shou ', # 0x88
'Diao ', # 0x89
'Pou ', # 0x8a
'Di ', # 0x8b
'Zhang ', # 0x8c
'Gun ', # 0x8d
'Ji ', # 0x8e
'Tao ', # 0x8f
'Qia ', # 0x90
'Qi ', # 0x91
'Pai ', # 0x92
'Shu ', # 0x93
'Qian ', # 0x94
'Ling ', # 0x95
'Yi ', # 0x96
'Ya ', # 0x97
'Jue ', # 0x98
'Zheng ', # 0x99
'Liang ', # 0x9a
'Gua ', # 0x9b
'Yi ', # 0x9c
'Huo ', # 0x9d
'Shan ', # 0x9e
'Zheng ', # 0x9f
'Lue ', # 0xa0
'Cai ', # 0xa1
'Tan ', # 0xa2
'Che ', # 0xa3
'Bing ', # 0xa4
'Jie ', # 0xa5
'Ti ', # 0xa6
'Kong ', # 0xa7
'Tui ', # 0xa8
'Yan ', # 0xa9
'Cuo ', # 0xaa
'Zou ', # 0xab
'Ju ', # 0xac
'Tian ', # 0xad
'Qian ', # 0xae
'Ken ', # 0xaf
'Bai ', # 0xb0
'Shou ', # 0xb1
'Jie ', # 0xb2
'Lu ', # 0xb3
'Guo ', # 0xb4
'Haba ', # 0xb5
'[?] ', # 0xb6
'Zhi ', # 0xb7
'Dan ', # 0xb8
'Mang ', # 0xb9
'Xian ', # 0xba
'Sao ', # 0xbb
'Guan ', # 0xbc
'Peng ', # 0xbd
'Yuan ', # 0xbe
'Nuo ', # 0xbf
'Jian ', # 0xc0
'Zhen ', # 0xc1
'Jiu ', # 0xc2
'Jian ', # 0xc3
'Yu ', # 0xc4
'Yan ', # 0xc5
'Kui ', # 0xc6
'Nan ', # 0xc7
'Hong ', # 0xc8
'Rou ', # 0xc9
'Pi ', # 0xca
'Wei ', # 0xcb
'Sai ', # 0xcc
'Zou ', # 0xcd
'Xuan ', # 0xce
'Miao ', # 0xcf
'Ti ', # 0xd0
'Nie ', # 0xd1
'Cha ', # 0xd2
'Shi ', # 0xd3
'Zong ', # 0xd4
'Zhen ', # 0xd5
'Yi ', # 0xd6
'Shun ', # 0xd7
'Heng ', # 0xd8
'Bian ', # 0xd9
'Yang ', # 0xda
'Huan ', # 0xdb
'Yan ', # 0xdc
'Zuan ', # 0xdd
'An ', # 0xde
'Xu ', # 0xdf
'Ya ', # 0xe0
'Wo ', # 0xe1
'Ke ', # 0xe2
'Chuai ', # 0xe3
'Ji ', # 0xe4
'Ti ', # 0xe5
'La ', # 0xe6
'La ', # 0xe7
'Cheng ', # 0xe8
'Kai ', # 0xe9
'Jiu ', # 0xea
'Jiu ', # 0xeb
'Tu ', # 0xec
'Jie ', # 0xed
'Hui ', # 0xee
'Geng ', # 0xef
'Chong ', # 0xf0
'Shuo ', # 0xf1
'She ', # 0xf2
'Xie ', # 0xf3
'Yuan ', # 0xf4
'Qian ', # 0xf5
'Ye ', # 0xf6
'Cha ', # 0xf7
'Zha ', # 0xf8
'Bei ', # 0xf9
'Yao ', # 0xfa
'[?] ', # 0xfb
'[?] ', # 0xfc
'Lan ', # 0xfd
'Wen ', # 0xfe
'Qin ', # 0xff
)
| gpl-3.0 |
Zulan/PBStats | PBs/Python/v7/PbWizard.py | 2 | 85196 | # Sid Meier's Civilization 4
# Copyright Firaxis Games 2005
#
# Sample PitBoss window/app framework
# Mustafa Thamer 2-15-05
#
from CvPythonExtensions import *
import sys
import wx
import wx.wizard
import wx.lib.scrolledpanel
import time
import string
import os.path
import Webserver
bPublic = True
bSaved = False
bScenario = False
bPatchConfirmed = False
bPatchOK = False
szPatchName = None
msgBox = None
PB = CyPitboss()
gc = CyGlobalContext()
localText = CyTranslator()
curPage = None
def loadSavegame(filename, folderIndex=0, adminPwd=""):
"""Check if filename can be found in several folders
and try to load this file
If filename already contains the full path use
folderIndex = -1.
"""
filepath = None
if folderIndex == -1:
if os.path.isfile(filename):
filepath = filename
else:
folderpaths = Webserver.getPossibleSaveFolders()
try:
folderpaths.insert(0, folderpaths[folderIndex])
except IndexError:
pass
for fp in folderpaths:
tmpFilePath = os.path.join(fp[0], filename)
if os.path.isfile(tmpFilePath):
filepath = tmpFilePath
break
if filepath is None:
iResult = -1
else:
pbPasswords.append(adminPwd)
matchingPwd = Webserver.searchMatchingPassword(filepath, pbPasswords)
if matchingPwd is None:
iResult = -2
else:
iResult = PB.load(filepath, str(matchingPwd)) # should be 0
# Store matching password hash for later usage.
Webserver.PbTmpSettings["adminpw"] = matchingPwd
return (iResult, filepath)
def start_shell(shell_settings, mode=""):
if shell_settings.get("enable", 0):
pythonDir = os.path.join(gc.getAltrootDir(), '..', 'Python', 'v7')
sys.path.append(pythonDir)
import Civ4ShellBackend
shell_ip = str(shell_settings.get("ip", "127.0.0.1"))
shell_port = int(shell_settings.get("port", 3333))
shell = Civ4ShellBackend.Server(shell_ip, shell_port)
shell.set_mode(mode)
return shell
else:
global CIV4_SHELL
CIV4_SHELL = False
return None
PbSettings = Webserver.getPbSettings()
pbPasswords = Webserver.getPbPasswords()
PBMOD_NOGUI = PbSettings.get("noGui", False)
PBMOD_AUTOSTART = False
if(PbSettings.get("save", {}).get("oneOffAutostart")):
PBMOD_AUTOSTART = True
del PbSettings["save"]["oneOffAutostart"]
# The oneOffAutostart-Key was removed. Save this status
Webserver.savePbSettings()
if PbSettings.get("autostart"):
PBMOD_AUTOSTART = True
# Check deprecated node position of 'autostart'
if PbSettings.get("save", {}).get("autostart"):
PBMOD_AUTOSTART = True
PbSettings["autostart"] = True
del PbSettings["save"]["autostart"]
Webserver.savePbSettings()
# Check deprecated node name
if PbSettings.get("save", {}).get("path"):
PbSettings["save"]["writefolder"] = PbSettings["save"]["path"]
del PbSettings["save"]["path"]
Webserver.savePbSettings()
"""
Attention: The different (gui, nogui, shell) leads
to totally different defined classes...
"""
CIV4_SHELL = PbSettings.get("shell", {}).get("enable", 0)
if PBMOD_NOGUI and not CIV4_SHELL:
# Above flags require autostart of server.
PBMOD_AUTOSTART = True
CyPitboss().consoleOut(
"PB Mod Start Options:\n\tAutostart: %i\n\tNo Gui:d %i\n\tShell: %i" % (
PBMOD_AUTOSTART, PBMOD_NOGUI, CIV4_SHELL)
)
if CIV4_SHELL and not PBMOD_AUTOSTART:
CyPitboss().consoleOut("Warning, application waits for commands on shell (see Pyconsole).")
CyPitboss().consoleOut("Disable shell or enable autostart if this is not desired.")
if CIV4_SHELL and not PBMOD_AUTOSTART:
""" New approach with interactive shell. """
#
# main app class
#
class StartupIFace:
def __init__(self, arg):
self.shell_loop = True
self.civ4Shell = {
"glob": globals(),
"loc": locals(),
"shell": start_shell(PbSettings.get("shell", {}), "pb_wizard")
}
if self.civ4Shell["shell"]:
self.civ4Shell["shell"].set_startup_iface(self)
self.civ4Shell["shell"].init()
self.OnInit()
def OnInit(self):
while self.shell_loop:
time.sleep(0.1)
try:
self.shell_loop = self.civ4Shell["shell"].update(
self.civ4Shell["glob"],
self.civ4Shell["loc"])
except Exception, e:
CyPitboss().consoleOut("self.civ4Shell error:" + str(e))
# Stop server (how to omit?)
# self.civ4Shell["shell"].s.close()
# time.sleep(1)
# self.civ4Shell["shell"].s.shutdown(2)
# time.sleep(1)
# self.civ4Shell["shell"].run = False
# time.sleep(1)
self.civ4Shell = {} # Force invoke of deconstructors?!
# time.sleep(5)
if True:
# Use predefined values to start up server
# without wizard pages
global bPublic
global bScenario
adminPwd = str(PbSettings.get("save", {}).get("adminpw", ""))
folderIndex = int(PbSettings.get("save", {}).get("folderIndex", 0))
filename = str(PbSettings["save"]["filename"])
(iResult, filepath) = loadSavegame(filename, folderIndex, adminPwd)
if iResult == 0:
PB.setLoadFileName(filepath)
if (not PB.host(bPublic, bScenario)):
PB.reset()
else:
PB.getDone()
PB.launch()
else:
# Loading error of Save
# Missing error message for user here...
PB.quit()
return True
def startWizard(self):
return True
def refreshRow(self, iRow):
return
elif PBMOD_NOGUI or PBMOD_AUTOSTART:
# Reduced GUI classes
#
# main app class
#
class StartupIFace:
def __init__(self, arg):
self.OnInit()
def OnInit(self):
# Currently, PBMOD_AUTOSTART is always true for PBMOD_NOGUI variant
if PBMOD_AUTOSTART:
# Use predefined values to start up server
# without wizard pages
global bPublic
global bScenario
adminPwd = str(PbSettings.get("save", {}).get("adminpw", ""))
folderIndex = int(PbSettings.get("save", {}).get("folderIndex", 0))
filename = str(PbSettings["save"]["filename"])
(iResult, filepath) = loadSavegame(filename, folderIndex, adminPwd)
if iResult == 0:
PB.setLoadFileName(filepath)
if (not PB.host(bPublic, bScenario)):
PB.reset()
else:
PB.getDone()
PB.launch()
else:
# Loading error of Save
# Missing error message for user here...
PB.quit()
return True
def startWizard(self):
return True
def refreshRow(self, iRow):
return
else:
# Normal GUI classes
#
# Mod Select Page (first page of wizard)
#
class ModSelectPage(wx.wizard.PyWizardPage):
def __init__(self, parent):
wx.wizard.PyWizardPage.__init__(self, parent)
self.next = self.prev = None
self.myParent = parent
pageSizer = wx.BoxSizer(wx.VERTICAL)
modPanel = wx.lib.scrolledpanel.ScrolledPanel(self, -1, size=(300, 600), style=wx.SUNKEN_BORDER)
sizer = wx.BoxSizer(wx.VERTICAL)
header = wx.StaticText(self, -1, localText.getText("TXT_KEY_PITBOSS_CHOOSE_MOD", ()))
pageSizer.Add(header, 0, wx.ALL, 5)
# Place the radio buttons
self.currentMod = 0
self.rbs = []
# First choice is no mod
self.rbs.append(wx.RadioButton(
modPanel, -1, localText.getText("TXT_KEY_MAIN_MENU_NONE", ()), wx.DefaultPosition, wx.DefaultSize, wx.RB_GROUP
))
sizer.Add(self.rbs[0], 0, wx.ALL, 3)
if (PB.getModName() == ""):
self.rbs[0].SetValue(True)
index = 0
for index in range(PB.getNumMods()):
self.rbs.append(wx.RadioButton(
modPanel, -1, PB.getModAt(index), wx.DefaultPosition, wx.DefaultSize
))
sizer.Add(self.rbs[index+1], 0, wx.ALL, 3)
if (PB.isCurrentMod(index)):
self.currentMod = index+1
self.rbs[index+1].SetValue(True)
modPanel.SetSizer(sizer)
modPanel.SetAutoLayout(1)
modPanel.SetupScrolling()
pageSizer.Add(modPanel, 0, wx.ALL, 5)
self.SetSizer(pageSizer)
self.Bind(wx.wizard.EVT_WIZARD_PAGE_CHANGED, self.OnPageChanged)
self.Bind(wx.wizard.EVT_WIZARD_PAGE_CHANGING, self.OnPageChanging)
def enableButtons(self):
self.myParent.FindWindowById(wx.ID_FORWARD).Enable(True)
self.myParent.FindWindowById(wx.ID_BACKWARD).Enable(False)
def OnPageChanged(self, event):
global curPage
global bPatchConfirmed
global bPatchOK
bPatchConfirmed = False
bPatchOK = False
# Determine what buttons should be enabled
self.enableButtons()
# We are the current page
curPage = self
def OnPageChanging(self, event):
# Check direction
if event.GetDirection():
# We are trying to move forward - have we selected another mod?
# Determine our selection
iSelection = 0
while (not self.rbs[iSelection].GetValue() and iSelection < PB.getNumMods()):
iSelection = iSelection+1
# Do we need to load a mod
if (iSelection != self.currentMod):
# Yep.
PB.loadMod(iSelection-1)
PB.quit()
def SetNext(self, next):
self.next = next
def SetPrev(self, prev):
self.prev = prev
def GetNext(self):
"Select which next page to show based on network selected"
next = self.next
# Determine our selection
iSelection = 0
while (not self.rbs[iSelection].GetValue() and iSelection < PB.getNumMods()):
iSelection = iSelection+1
# Do we need to load a mod
if (iSelection != self.currentMod):
next = None
return next
def GetPrev(self):
return self.prev
#
# SMTP Login Page
#
class SMTPLoginPage(wx.wizard.WizardPageSimple):
def __init__(self, parent):
wx.wizard.WizardPageSimple.__init__(self, parent)
self.myParent = parent
header = wx.StaticText(self, -1, localText.getText("TXT_KEY_PITBOSS_SMTP_HEADER", ()))
hostLbl = wx.StaticText(self, -1, localText.getText("TXT_KEY_PITBOSS_SMTP_HOST", ()))
self.host = wx.TextCtrl(self, -1, PB.getSMTPHost(), size=(125, -1))
self.host.SetHelpText(localText.getText("TXT_KEY_PITBOSS_SMTP_HOST_HELP", ()))
self.host.SetInsertionPoint(0)
usernameLbl = wx.StaticText(self, -1, localText.getText("TXT_KEY_PITBOSS_SMTP_LOGIN", ()))
self.username = wx.TextCtrl(self, -1, PB.getSMTPLogin(), size=(125, -1))
self.username.SetHelpText(localText.getText("TXT_KEY_PITBOSS_SMTP_LOGIN_HELP", ()))
passwordLbl = wx.StaticText(self, -1, localText.getText("TXT_KEY_PITBOSS_SMTP_PASSWORD", ()))
self.password = wx.TextCtrl(self, -1, "", size=(125, -1), style=wx.TE_PASSWORD)
self.password.SetHelpText(localText.getText("TXT_KEY_PITBOSS_SMTP_PASSWORD_HELP", ()))
emailLbl = wx.StaticText(self, -1, localText.getText("TXT_KEY_POPUP_DETAILS_EMAIL", ()))
self.email = wx.TextCtrl(self, -1, PB.getEmail(), size=(125, -1))
self.email.SetHelpText(localText.getText("TXT_KEY_POPUP_DETAILS_EMAIL", ()))
self.Bind(wx.wizard.EVT_WIZARD_PAGE_CHANGING, self.OnPageChanging)
self.Bind(wx.wizard.EVT_WIZARD_PAGE_CHANGED, self.OnPageChanged)
sizer = wx.FlexGridSizer(cols=2, hgap=4, vgap=4)
sizer.AddMany([hostLbl, self.host,
usernameLbl, self.username,
passwordLbl, self.password,
emailLbl, self.email,
])
border = wx.BoxSizer(wx.VERTICAL)
border.Add(sizer, 0, wx.ALL, 25)
self.SetSizer(border)
self.SetAutoLayout(True)
def enableButtons(self):
self.myParent.FindWindowById(wx.ID_FORWARD).Enable(True)
self.myParent.FindWindowById(wx.ID_BACKWARD).Enable(True)
def OnPageChanged(self, event):
global curPage
# Determine what buttons should be enabled
self.enableButtons()
# We are the current page
curPage = self
def OnPageChanging(self, event):
# Check direction
if event.GetDirection():
# We are trying to move forward - set the SMTP values
PB.setSMTPValues(self.host.GetValue(), self.username.GetValue(), self.password.GetValue(), self.email.GetValue())
#
# Network Selection Page
#
class NetSelectPage(wx.wizard.PyWizardPage):
def __init__(self, parent):
wx.wizard.PyWizardPage.__init__(self, parent)
self.next = self.prev = None
self.myParent = parent
# Place the radio buttons
selections = [localText.getText("TXT_KEY_PITBOSS_DIRECTIP", ()), localText.getText("TXT_KEY_PITBOSS_LAN", ()), localText.getText("TXT_KEY_PITBOSS_INTERNET", ())]
sizer = wx.BoxSizer(wx.VERTICAL)
self.rb = wx.RadioBox(
self, -1, localText.getText("TXT_KEY_PITBOSS_SELECT_NETWORK", ()), wx.DefaultPosition, wx.DefaultSize,
selections, 1, wx.RA_SPECIFY_COLS
)
self.rb.SetToolTip(wx.ToolTip(localText.getText("TXT_KEY_PITBOSS_SELECT_NETWORK_HELP", ())))
sizer.Add(self.rb, 0, wx.ALL, 5)
self.SetSizer(sizer)
self.Bind(wx.wizard.EVT_WIZARD_PAGE_CHANGED, self.OnPageChanged)
def enableButtons(self):
self.myParent.FindWindowById(wx.ID_FORWARD).Enable(True)
self.myParent.FindWindowById(wx.ID_BACKWARD).Enable(True)
def OnPageChanged(self, event):
global curPage
# Determine what buttons should be enabled
self.enableButtons()
# We are the current page
curPage = self
def SetNext(self, next):
self.next = next
def SetPrev(self, prev):
self.prev = prev
def GetNext(self):
"Select which next page to show based on network selected"
global bPublic
next = self.next
if (self.rb.GetSelection() == 0):
bPublic = True
next = next.GetNext()
elif (self.rb.GetSelection() == 1):
bPublic = False
next = next.GetNext()
else:
bPublic = True
return next
def GetPrev(self):
return self.prev
#
# Login page (optional 2nd page)
#
class LoginPage(wx.wizard.WizardPageSimple):
def __init__(self, parent):
wx.wizard.WizardPageSimple.__init__(self, parent)
self.myParent = parent
header = wx.StaticText(self, -1, localText.getText("TXT_KEY_PITBOSS_LOGIN", ()))
usernameLbl = wx.StaticText(self, -1, localText.getText("TXT_KEY_PITBOSS_USERNAME", ()))
self.username = wx.TextCtrl(self, -1, "", size=(125, -1))
self.username.SetHelpText(localText.getText("TXT_KEY_PITBOSS_USERNAME_HELP", ()))
self.username.SetInsertionPoint(0)
self.Bind(wx.EVT_TEXT, self.OnTextEntered, self.username)
passwordLbl = wx.StaticText(self, -1, localText.getText("TXT_KEY_PITBOSS_PASSWORD", ()))
self.password = wx.TextCtrl(self, -1, "", size=(125, -1), style=wx.TE_PASSWORD)
self.password.SetHelpText(localText.getText("TXT_KEY_PITBOSS_PASSWORD_HELP", ()))
self.Bind(wx.EVT_TEXT, self.OnTextEntered, self.password)
self.Bind(wx.wizard.EVT_WIZARD_PAGE_CHANGING, self.OnPageChanging)
self.Bind(wx.wizard.EVT_WIZARD_PAGE_CHANGED, self.OnPageChanged)
sizer = wx.FlexGridSizer(cols=2, hgap=4, vgap=4)
sizer.AddMany([usernameLbl, self.username,
passwordLbl, self.password,
])
border = wx.BoxSizer(wx.VERTICAL)
border.Add(sizer, 0, wx.ALL, 25)
self.SetSizer(border)
self.SetAutoLayout(True)
def enableButtons(self):
global bPatchConfirmed
global bPatchOK
if (not bPatchConfirmed):
# Not confirmed, disable buttons
self.myParent.FindWindowById(wx.ID_FORWARD).Disable()
self.myParent.FindWindowById(wx.ID_BACKWARD).Disable()
# Check to see if there is text in both boxes
elif ((self.username.GetValue() == "") or (self.password.GetValue() == "") or (not bPatchOK)):
# There isn't, disable the forward button
self.myParent.FindWindowById(wx.ID_FORWARD).Disable()
self.myParent.FindWindowById(wx.ID_BACKWARD).Enable(True)
else:
# Text entered, enable the forward button
self.myParent.FindWindowById(wx.ID_FORWARD).Enable(True)
self.myParent.FindWindowById(wx.ID_BACKWARD).Enable(True)
def patchAvailable(self, patchName, patchUrl):
global bPatchConfirmed
global szPatchName
# Put up a dialog
dlg = wx.MessageDialog(
self, localText.getText("TXT_KEY_PITBOSS_PATCH_REQUIRED_DESC", ()),
localText.getText("TXT_KEY_PITBOSS_PATCH_REQUIRED_TITLE", ()), wx.OK | wx.CANCEL | wx.ICON_EXCLAMATION)
# Show the modal dialog and get the response
if dlg.ShowModal() == wx.ID_OK:
# They want to download the patch - tell the app
# Fix for infinit hanging due gamespy shutdown
if (not PB.downloadPatch(patchName, patchUrl)):
# if (False):
# Patching failed - tell the user
msg = wx.MessageBox(localText.getText("TXT_KEY_PITBOSS_PATCH_DOWNLOAD_ERROR_DESC", ()), localText.getText("TXT_KEY_PITBOSS_PATCH_DOWNLOAD_ERROR_TITLE", ()), wx.ICON_ERROR)
bPatchConfirmed = True
szPatchName = patchName
self.enableButtons()
else:
bPatchConfirmed = True
self.enableButtons()
def patchComplete(self):
global bPatchConfirmed
global bPatchOK
global szPatchName
# Put up a dialog
dlg = wx.MessageDialog(
self, localText.getText("TXT_KEY_PITBOSS_PATCH_COMPLETE_DESC", ()),
localText.getText("TXT_KEY_PITBOSS_PATCH_COMPLETE_TITLE", ()), wx.OK | wx.ICON_EXCLAMATION)
# Show the dialog and get the response
if dlg.ShowModal() == wx.ID_OK:
# They want to restart - tell the app
# Fix for infinit hanging due gamespy shutdown
PB.installPatch(szPatchName)
# pass
else:
# Not sure if this can actually happen, but handle it anyway
bPatchConfirmed = True
bPatchOK = False
def OnTextEntered(self, event):
# Determine what buttons should be enabled
self.enableButtons()
def OnPageChanging(self, event):
# Check direction
if event.GetDirection():
# We are trying to move forward - check password
if (not PB.login(self.username.GetValue(), self.password.GetValue())):
# Login failed - let the user know
msg = wx.MessageBox((localText.getText("TXT_KEY_PITBOSS_LOGIN_FAILED", ())),
(localText.getText("TXT_KEY_PITBOSS_LOGIN_ERROR", ())), wx.ICON_ERROR)
# Veto the event to prevent moving forward
event.Veto()
def OnPageChanged(self, event):
global bPatchConfirmed
global curPage
# Check for a patch here
if (not bPatchConfirmed):
# Fix for infinit hanging due gamespy shutdown
# if (not PB.checkPatch()):
if (False):
# Error in checking for a patch
msg = wx.MessageBox(localText.getText("TXT_KEY_PITBOSS_PATCH_CHECK_ERROR_DESC", ()), localText.getText("TXT_KEY_PITBOSS_PATCH_DOWNLOAD_ERROR_TITLE", ()), wx.ICON_ERROR)
bPatchConfirmed = True
# Determine what buttons should be enabled
self.enableButtons()
# We are the current page
curPage = self
#
# Load Select Page
#
class LoadSelectPage(wx.wizard.PyWizardPage):
def __init__(self, parent):
wx.wizard.PyWizardPage.__init__(self, parent)
self.next = self.prev = None
self.myParent = parent
# Place the radio buttons
selections = [localText.getText("TXT_KEY_PITBOSS_NEWGAME", ()), localText.getText("TXT_KEY_PITBOSS_SCENARIO", ()), localText.getText("TXT_KEY_PITBOSS_LOADGAME", ())]
sizer = wx.BoxSizer(wx.VERTICAL)
self.rb = wx.RadioBox(
self, -1, (localText.getText("TXT_KEY_PITBOSS_SELECT_INIT", ())), wx.DefaultPosition, wx.DefaultSize,
selections, 1, wx.RA_SPECIFY_COLS
)
self.rb.SetToolTip(wx.ToolTip((localText.getText("TXT_KEY_PITBOSS_SELECT_INIT_HELP", ()))))
sizer.Add(self.rb, 0, wx.ALL, 5)
self.SetSizer(sizer)
self.Bind(wx.wizard.EVT_WIZARD_PAGE_CHANGED, self.OnPageChanged)
self.Bind(wx.wizard.EVT_WIZARD_PAGE_CHANGING, self.OnPageChanging)
def enableButtons(self):
# If the patch state is ok, enable appropriate buttons
global bPatchConfirmed
self.myParent.FindWindowById(wx.ID_FORWARD).Enable(bPatchConfirmed)
self.myParent.FindWindowById(wx.ID_BACKWARD).Enable(bPatchConfirmed)
# Fix for infinit hanging due gamespy shutdown
self.myParent.FindWindowById(wx.ID_FORWARD).Enable(True)
self.myParent.FindWindowById(wx.ID_BACKWARD).Enable(True)
def patchAvailable(self, patchName, patchUrl):
global bPatchConfirmed
# Put up a dialog
dlg = wx.MessageDialog(
self, localText.getText("TXT_KEY_PITBOSS_PATCH_AVAILABLE_DESC", ()),
localText.getText("TXT_KEY_PITBOSS_PATCH_AVAILABLE_TITLE", ()), wx.YES_NO | wx.ICON_QUESTION)
# Show the modal dialog and get the response
if dlg.ShowModal() == wx.ID_YES:
# They want to download the patch - tell the app
if (not PB.downloadPatch(patchName, patchUrl)):
# Patching failed - tell the user
msg = wx.MessageBox(localText.getText("TXT_KEY_PITBOSS_PATCH_DOWNLOAD_ERROR_DESC", ()), localText.getText("TXT_KEY_PITBOSS_PATCH_DOWNLOAD_ERROR_TITLE", ()), wx.ICON_ERROR)
bPatchConfirmed = True
self.enableButtons()
else:
# They didn't want to download it, which is ok for LAN games
bPatchConfirmed = True
self.enableButtons()
def patchComplete(self):
global bPatchConfirmed
global bPatchOK
global szPatchName
# Put up a dialog
dlg = wx.MessageDialog(
self, localText.getText("TXT_KEY_PITBOSS_PATCH_COMPLETE_DESC", ()),
localText.getText("TXT_KEY_PITBOSS_PATCH_COMPLETE_TITLE", ()), wx.OK | wx.CANCEL | wx.ICON_EXCLAMATION)
# Show the dialog and get the response
if dlg.ShowModal() == wx.ID_OK:
# They want to restart - tell the app
PB.installPatch(szPatchName)
else:
# This is ok for LAN games
bPatchConfirmed = True
bPatchOK = False
def OnPageChanged(self, event):
global curPage
# If we haven't already, check for a patch
global bPatchConfirmed
if (not bPatchConfirmed):
# Fix for infinit hanging due gamespy shutdown
# if (not PB.checkPatch()):
if (False):
# Error in checking for a patch
msg = wx.MessageBox(localText.getText("TXT_KEY_PITBOSS_PATCH_CHECK_ERROR_DESC", ()), localText.getText("TXT_KEY_PITBOSS_PATCH_CHECK_ERROR_TITLE", ()), wx.ICON_ERROR)
bPatchConfirmed = True
# Determine what buttons should be enabled
self.enableButtons()
curPage = self
def SetNext(self, next):
self.next = next
def SetPrev(self, prev):
self.prev = prev
def GetNext(self):
"Determine which page to display next"
next = self.next
if (self.rb.GetSelection() == 0):
# If it's a new game, skip the scenario selector
next = next.GetNext()
if (self.rb.GetSelection() == 2):
# If it's a loaded game, launch now
next = None
return next
def GetPrev(self):
return self.prev
def OnPageChanging(self, event):
global bSaved
global bScenario
# Check direction
if event.GetDirection():
# We are trying to move forward - are we trying to init'ing or loading game?
if (self.rb.GetSelection() == 2):
# Loading a game - popup the file browser
bScenario = False
dlg = wx.FileDialog(
self, message=(localText.getText("TXT_KEY_PITBOSS_CHOOSE_SAVE", ())), defaultDir=".\Saves\multi",
defaultFile="", wildcard=localText.getText("TXT_KEY_PITBOSS_SAVE_FILES", ("(*.CivBeyondSwordSave)|*.CivBeyondSwordSave", )), style=wx.OPEN
)
# Show the modal dialog and get the response
if dlg.ShowModal() == wx.ID_OK:
# Get the file name
path = dlg.GetPath()
if (path != ""):
# Prompt for admin password
dlg = wx.TextEntryDialog(
self, localText.getText("TXT_KEY_MAIN_MENU_CIV_ADMINPWD_DESC", ()),
localText.getText("TXT_MAIN_MENU_CIV_PASSWORD_TITLEBAR", ()))
# Show the modal dialog and get the response
if dlg.ShowModal() == wx.ID_OK:
# Check the game name
adminPwd = dlg.GetValue()
# We got a save file - try to load the setup info
# iResult = PB.load(path, adminPwd)
(iResult, filepath) = loadSavegame(path, -1, adminPwd)
if (iResult != 0):
# Loading setup info failed. Clean up and exit
if (iResult == 1):
msg = wx.MessageBox((localText.getText("TXT_KEY_PITBOSS_ERROR_LOADING", ())), (localText.getText("TXT_KEY_PITBOSS_LOAD_ERROR", ())), wx.ICON_ERROR)
elif (iResult == -1):
msg = wx.MessageBox((localText.getText("TXT_MAIN_MENU_CIV_PASSWORD_RETRY_DESC", ())), (localText.getText("TXT_KEY_BAD_PASSWORD_TITLE", ())), wx.ICON_ERROR)
PB.reset()
event.Veto()
else:
# Successfully loaded, try hosting
PB.setLoadFileName(path)
if (not PB.host(bPublic, bScenario)):
msg = wx.MessageBox((localText.getText("TXT_KEY_PITBOSS_ERROR_HOSTING", ())), (localText.getText("TXT_KEY_PITBOSS_HOST_ERROR", ())), wx.ICON_ERROR)
PB.reset()
event.Veto()
else:
bSaved = True
else:
# User cancelled admin password
PB.reset()
event.Veto()
else:
# Didn't get a save file - veto the page change
event.Veto()
else:
# User hit cancel - veto the page change
event.Veto()
# Destroy the dialog
dlg.Destroy()
else:
bSaved = False
# Check to make sure this is a valid option
if (self.rb.GetSelection() == 0):
# New game - check maps
if (PB.getNumMapScripts() == 0):
msg = wx.MessageBox((localText.getText("TXT_KEY_PITBOSS_NO_MAPS_DESC", ())), (localText.getText("TXT_KEY_PITBOSS_NO_MAPS_TITLE", ())), wx.ICON_EXCLAMATION)
event.Veto()
return
if (self.rb.GetSelection() == 1):
# New game - check scenarios
if (PB.getNumScenarios() == 0):
msg = wx.MessageBox((localText.getText("TXT_KEY_PITBOSS_NO_SCENARIOS_DESC", ())), (localText.getText("TXT_KEY_PITBOSS_NO_SCENARIOS_TITLE", ())), wx.ICON_EXCLAMATION)
event.Veto()
return
# Hosting a new game - pop the gamename dialog
dlg = wx.TextEntryDialog(
self, localText.getText("TXT_KEY_PITBOSS_NAME_GAME_DESC", ()),
localText.getText("TXT_KEY_PITBOSS_NAME_GAME_TITLE", ()))
# Show the modal dialog and get the response
if dlg.ShowModal() == wx.ID_OK:
# Check the game name
gamename = dlg.GetValue()
if (gamename != ""):
# We got a gamename, save it here
PB.setGamename(gamename)
# Prompt for passwords in public games
bOK = (not bPublic)
if bPublic:
dlg = wx.TextEntryDialog(
self, localText.getText("TXT_KEY_PITBOSS_PWD_GAME_DESC", ()),
localText.getText("TXT_KEY_PITBOSS_PWD_GAME_TITLE", ()))
if (dlg.ShowModal() == wx.ID_OK):
bOK = True
PB.setGamePassword(dlg.GetValue())
if bOK:
# If we are starting a new game, host
if (self.rb.GetSelection() == 0):
bScenario = False
if (not PB.host(bPublic, bScenario)):
# Hosting failed for some reason. Clean up and exit
msg = wx.MessageBox((localText.getText("TXT_KEY_PITBOSS_ERROR_HOSTING", ())), (localText.getText("TXT_KEY_PITBOSS_HOST_ERROR", ())), wx.ICON_ERROR)
PB.reset()
event.Veto()
else:
# User hit cancel
event.Veto()
else:
# Malicious user didn't enter a gamename...
event.Veto()
else:
# User hit cancel
event.Veto()
dlg.Destroy()
else:
# We are moving backward - reset the network layer
PB.reset()
PB.logout()
#
# Scenario Selection page (optional 4th page)
#
class ScenarioSelectPage(wx.wizard.WizardPageSimple):
def __init__(self, parent):
wx.wizard.WizardPageSimple.__init__(self, parent)
self.myParent = parent
pageSizer = wx.BoxSizer(wx.VERTICAL)
scenarioPanel = wx.lib.scrolledpanel.ScrolledPanel(self, -1, size=(300, 600), style=wx.SUNKEN_BORDER)
sizer = wx.BoxSizer(wx.VERTICAL)
header = wx.StaticText(self, -1, localText.getText("TXT_KEY_PITBOSS_CHOOSE_SCENARIO", ()))
pageSizer.Add(header, 0, wx.ALL, 5)
# Place the radio buttons
self.rbs = []
index = 0
global PBMOD_AUTOSTART
if not PBMOD_AUTOSTART:
for index in range(PB.getNumScenarios()):
# We need to start a group on the first one
if (index == 0):
self.rbs.append(wx.RadioButton(
scenarioPanel, -1, PB.getScenarioAt(index), wx.DefaultPosition, wx.DefaultSize, wx.RB_GROUP
))
else:
self.rbs.append(wx.RadioButton(
scenarioPanel, -1, PB.getScenarioAt(index), wx.DefaultPosition, wx.DefaultSize
))
sizer.Add(self.rbs[index], 0, wx.ALL, 3)
scenarioPanel.SetSizer(sizer)
scenarioPanel.SetAutoLayout(1)
scenarioPanel.SetupScrolling()
pageSizer.Add(scenarioPanel, 0, wx.ALL, 5)
self.SetSizer(pageSizer)
self.Bind(wx.wizard.EVT_WIZARD_PAGE_CHANGED, self.OnPageChanged)
self.Bind(wx.wizard.EVT_WIZARD_PAGE_CHANGING, self.OnPageChanging)
def enableButtons(self):
self.myParent.FindWindowById(wx.ID_FORWARD).Enable(True)
self.myParent.FindWindowById(wx.ID_BACKWARD).Enable(True)
def OnPageChanged(self, event):
global curPage
# Determine what buttons should be enabled
self.enableButtons()
curPage = self
def OnPageChanging(self, event):
global bPublic
global bScenario
# Check direction
if event.GetDirection():
# Determine our selection
iSelection = 0
while (not self.rbs[iSelection].GetValue() and iSelection < PB.getNumScenarios()):
iSelection = iSelection+1
# We are trying to move forward - Set the selected scenario
if (PB.loadScenarioInfo(PB.getScenarioAt(iSelection))):
bScenario = True
if (not PB.host(bPublic, bScenario)):
# Hosting failed for some reason. Clean up and exit
msg = wx.MessageBox((localText.getText("TXT_KEY_PITBOSS_ERROR_HOSTING", ())), (localText.getText("TXT_KEY_PITBOSS_HOST_ERROR", ())), wx.ICON_ERROR)
PB.reset()
event.Veto()
else:
# Loading the scenario failed
msg = wx.MessageBox((localText.getText("TXT_KEY_PITBOSS_SCENARIO_ERROR", ())), (localText.getText("TXT_KEY_PITBOSS_SCENARIO_ERROR_TITLE", ())), wx.ICON_ERROR)
PB.reset()
event.Veto()
else:
# We are moving backward
PB.reset()
#
# Staging room (last page before launch)
#
class StagingPage(wx.wizard.WizardPageSimple):
def __init__(self, parent):
wx.wizard.WizardPageSimple.__init__(self, parent)
self.myParent = parent
# Get the game info struct
gameData = PB.getGameSetupData()
# Create our array of controls
self.optionArray = []
self.mpOptionArray = []
self.victoriesArray = []
self.whoArray = []
self.civArray = []
self.leaderArray = []
self.teamArray = []
self.diffArray = []
self.statusArray = []
# Declare storage arrays
self.customItemSizerArray = []
self.customMapTextArray = []
self.customMapOptionArray = []
# Build the initial selections
# Map
mapNameList = []
rowNum = 0
for rowNum in range(PB.getNumMapScripts()):
mapNameList.append((PB.getMapNameAt(rowNum)))
# World size
sizeList = []
rowNum = 0
for rowNum in range(PB.getNumSizes()):
sizeList.append((PB.getSizeAt(rowNum)))
# Climate
climateList = []
rowNum = 0
for rowNum in range(PB.getNumClimates()):
climateList.append((PB.getClimateAt(rowNum)))
# Sealevel
seaLevelList = []
rowNum = 0
for rowNum in range(PB.getNumSeaLevels()):
seaLevelList.append((PB.getSeaLevelAt(rowNum)))
# Era
eraList = []
rowNum = 0
for rowNum in range(PB.getNumEras()):
eraList.append((PB.getEraAt(rowNum)))
# Game speed
speedList = []
rowNum = 0
for rowNum in range(PB.getNumSpeeds()):
speedList.append((PB.getSpeedAt(rowNum)))
# Options
optionList = []
rowNum = 0
for rowNum in range(PB.getNumOptions()):
optionList.append((PB.getOptionDescAt(rowNum)))
# Create the master page sizer
self.pageSizer = wx.BoxSizer(wx.VERTICAL)
# Create the game options area
masterBorder = wx.StaticBox(self, -1, ((localText.getText("TXT_KEY_PITBOSS_GAME_SETUP", ()))))
self.optionsSizer = wx.StaticBoxSizer(masterBorder, wx.HORIZONTAL)
# Create the drop down side
settingsBorder = wx.StaticBox(self, -1, ((localText.getText("TXT_KEY_PITBOSS_GAME_SETTINGS", ()))))
self.dropDownSizer = wx.StaticBoxSizer(settingsBorder, wx.VERTICAL)
# Create label/control pairs for map
itemSizer = wx.BoxSizer(wx.VERTICAL)
txt = wx.StaticText(self, -1, (localText.getText("TXT_KEY_PITBOSS_MAP", ())))
self.mapChoice = wx.Choice(self, -1, (-1, -1), choices=mapNameList)
self.mapChoice.SetStringSelection(gameData.getMapName())
itemSizer.Add(txt)
itemSizer.Add(self.mapChoice)
self.dropDownSizer.Add(itemSizer, 0, wx.TOP, 3)
self.Bind(wx.EVT_CHOICE, self.OnGameChoice, self.mapChoice)
# Create label/control pairs for size
itemSizer = wx.BoxSizer(wx.VERTICAL)
txt = wx.StaticText(self, -1, (localText.getText("TXT_KEY_PITBOSS_SIZE", ())))
self.sizeChoice = wx.Choice(self, -1, (-1, -1), choices=sizeList)
self.sizeChoice.SetSelection(gameData.iSize)
itemSizer.Add(txt)
itemSizer.Add(self.sizeChoice)
self.dropDownSizer.Add(itemSizer, 0, wx.TOP, 3)
self.Bind(wx.EVT_CHOICE, self.OnGameChoice, self.sizeChoice)
# Create label/control pairs for climate
itemSizer = wx.BoxSizer(wx.VERTICAL)
txt = wx.StaticText(self, -1, (localText.getText("TXT_KEY_PITBOSS_CLIMATE", ())))
self.climateChoice = wx.Choice(self, -1, (-1, -1), choices=climateList)
self.climateChoice.SetSelection(gameData.iClimate)
itemSizer.Add(txt)
itemSizer.Add(self.climateChoice)
self.dropDownSizer.Add(itemSizer, 0, wx.TOP, 3)
self.Bind(wx.EVT_CHOICE, self.OnGameChoice, self.climateChoice)
# Create label/control pairs for sealevel
itemSizer = wx.BoxSizer(wx.VERTICAL)
txt = wx.StaticText(self, -1, (localText.getText("TXT_KEY_PITBOSS_SEALEVEL", ())))
self.seaLevelChoice = wx.Choice(self, -1, (-1, -1), choices=seaLevelList)
self.seaLevelChoice.SetSelection(gameData.iSeaLevel)
itemSizer.Add(txt)
itemSizer.Add(self.seaLevelChoice)
self.dropDownSizer.Add(itemSizer, 0, wx.TOP, 3)
self.Bind(wx.EVT_CHOICE, self.OnGameChoice, self.seaLevelChoice)
# Create label/control pairs for era
itemSizer = wx.BoxSizer(wx.VERTICAL)
txt = wx.StaticText(self, -1, (localText.getText("TXT_KEY_PITBOSS_ERA", ())))
self.eraChoice = wx.Choice(self, -1, (-1, -1), choices=eraList)
self.eraChoice.SetSelection(gameData.iEra)
itemSizer.Add(txt)
itemSizer.Add(self.eraChoice)
self.dropDownSizer.Add(itemSizer, 0, wx.TOP, 3)
self.Bind(wx.EVT_CHOICE, self.OnGameChoice, self.eraChoice)
# Create label/control pairs for speed
itemSizer = wx.BoxSizer(wx.VERTICAL)
txt = wx.StaticText(self, -1, (localText.getText("TXT_KEY_PITBOSS_SPEED", ())))
self.speedChoice = wx.Choice(self, -1, (-1, -1), choices=speedList)
self.speedChoice.SetSelection(gameData.iSpeed)
itemSizer.Add(txt)
itemSizer.Add(self.speedChoice)
self.dropDownSizer.Add(itemSizer, 0, wx.TOP, 3)
self.Bind(wx.EVT_CHOICE, self.OnGameChoice, self.speedChoice)
# Create label/control pairs for custom map options
self.buildCustomMapOptions(gameData.getMapName())
self.optionsSizer.Add(self.dropDownSizer, 0, wx.RIGHT, 10)
# Create the multiplayer option column
centerSizer = wx.BoxSizer(wx.VERTICAL)
mpOptionsBorder = wx.StaticBox(self, -1, ((localText.getText("TXT_KEY_PITBOSS_GAME_MPOPTIONS", ()))))
mpOptionsSizer = wx.StaticBoxSizer(mpOptionsBorder, wx.VERTICAL)
# Create and add Multiplayer option checkboxes
rowNum = 0
for rowNum in range(PB.getNumMPOptions()):
mpCheckBox = wx.CheckBox(self, (rowNum+1000), (PB.getMPOptionDescAt(rowNum)))
mpCheckBox.SetValue(gameData.getMPOptionAt(rowNum))
mpOptionsSizer.Add(mpCheckBox, 0, wx.TOP, 5)
self.mpOptionArray.append(mpCheckBox)
self.Bind(wx.EVT_CHECKBOX, self.OnOptionChoice, mpCheckBox)
# Entry box to set turn timer time
timerOutputSizer = wx.BoxSizer(wx.HORIZONTAL)
timerPreText = wx.StaticText(self, -1, localText.getText("TXT_KEY_PITBOSS_TURNTIMER_A", ()))
self.turnTimerEdit = wx.TextCtrl(self, -1, str(gameData.iTurnTime), size=(30, -1))
timerPostText = wx.StaticText(self, -1, localText.getText("TXT_KEY_PITBOSS_TURNTIMER_B", ()))
timerOutputSizer.Add(timerPreText, 0, wx.TOP, 5)
timerOutputSizer.Add(self.turnTimerEdit, 0, wx.TOP, 5)
timerOutputSizer.Add(timerPostText, 0, wx.TOP, 5)
self.Bind(wx.EVT_TEXT, self.OnTurnTimeEntered, self.turnTimerEdit)
mpOptionsSizer.Add(timerOutputSizer, 0, wx.ALL, 5)
# Entry box for game turn limit
maxTurnsSizer = wx.BoxSizer(wx.HORIZONTAL)
maxTurnsText = wx.StaticText(self, -1, localText.getText("TXT_KEY_PITBOSS_MAX_TURN", ()))
self.maxTurnsEdit = wx.TextCtrl(self, -1, str(gameData.iMaxTurns), size=(30, -1))
maxTurnsSizer.Add(maxTurnsText, 0, wx.TOP, 5)
maxTurnsSizer.Add(self.maxTurnsEdit, 0, wx.TOP, 5)
self.Bind(wx.EVT_TEXT, self.OnMaxTurnsEntered, self.maxTurnsEdit)
mpOptionsSizer.Add(maxTurnsSizer, 0, wx.ALL, 5)
# Entry box for city elimination limit
cityEliminationSizer = wx.BoxSizer(wx.HORIZONTAL)
cityEliminationText = wx.StaticText(self, -1, localText.getText("TXT_KEY_PITBOSS_CITY_ELIMINATION", ()))
self.cityEliminationEdit = wx.TextCtrl(self, -1, str(gameData.iCityElimination), size=(30, -1))
cityEliminationSizer.Add(cityEliminationText, 0, wx.TOP, 5)
cityEliminationSizer.Add(self.cityEliminationEdit, 0, wx.TOP, 5)
self.Bind(wx.EVT_TEXT, self.OnCityEliminationEntered, self.cityEliminationEdit)
mpOptionsSizer.Add(cityEliminationSizer, 0, wx.ALL, 5)
centerSizer.Add(mpOptionsSizer, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.LEFT | wx.RIGHT | wx.BOTTOM, 5)
victoriesBorder = wx.StaticBox(self, -1, ((localText.getText("TXT_KEY_PITBOSS_GAME_VICTORIES", ()))))
victoriesSizer = wx.StaticBoxSizer(victoriesBorder, wx.VERTICAL)
# Create and add Victory option checkboxes
rowNum = 0
for rowNum in range(PB.getNumVictories()):
victoryCheckBox = wx.CheckBox(self, (rowNum+2000), (PB.getVictoryDescAt(rowNum)))
victoryCheckBox.SetValue(gameData.getVictory(rowNum))
victoriesSizer.Add(victoryCheckBox, 0, wx.TOP, 5)
self.victoriesArray.append(victoryCheckBox)
self.Bind(wx.EVT_CHECKBOX, self.OnOptionChoice, victoryCheckBox)
centerSizer.Add(victoriesSizer, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALL, 5)
# Entry box for admin password
itemSizer = wx.BoxSizer(wx.VERTICAL)
txt = wx.StaticText(self, -1, (localText.getText("TXT_KEY_POPUP_ADMIN_PASSWORD", ())))
self.adminPasswordEdit = wx.TextCtrl(self, -1, "", size=(100, -1))
itemSizer.Add(txt)
itemSizer.Add(self.adminPasswordEdit)
mpOptionsSizer.Add(itemSizer, 0, wx.TOP, 5)
self.Bind(wx.EVT_TEXT, self.OnAdminPasswordEntered, self.adminPasswordEdit)
self.optionsSizer.Add(centerSizer, 0, wx.ALIGN_CENTER_HORIZONTAL)
# Create the CheckBox side
optionsBorder1 = wx.StaticBox(self, -1, ((localText.getText("TXT_KEY_PITBOSS_GAME_OPTIONS", ()))))
optionsBorder2 = wx.StaticBox(self, -1, ((localText.getText("TXT_KEY_PITBOSS_GAME_OPTIONS", ()))))
optionsBorder3 = wx.StaticBox(self, -1, ((localText.getText("TXT_KEY_PITBOSS_GAME_OPTIONS", ()))))
checkBoxSizer1 = wx.StaticBoxSizer(optionsBorder1, wx.VERTICAL)
checkBoxSizer2 = wx.StaticBoxSizer(optionsBorder2, wx.VERTICAL)
checkBoxSizer3 = wx.StaticBoxSizer(optionsBorder3, wx.VERTICAL)
# Create and add the Options checkboxes
import math
rowNum1 = math.ceil(PB.getNumOptions()/3.0)
rowNum2 = 2 * rowNum1
for rowNum in range(PB.getNumOptions()):
checkBox = wx.CheckBox(self, rowNum, (PB.getOptionDescAt(rowNum)))
checkBox.SetValue(gameData.getOptionAt(rowNum))
if rowNum < rowNum1:
checkBoxSizer1.Add(checkBox, 0, wx.TOP, 5)
elif rowNum < rowNum2:
checkBoxSizer2.Add(checkBox, 0, wx.TOP, 5)
else:
checkBoxSizer3.Add(checkBox, 0, wx.TOP, 5)
self.optionArray.append(checkBox)
self.Bind(wx.EVT_CHECKBOX, self.OnOptionChoice, checkBox)
self.optionsSizer.Add(checkBoxSizer1, 0, wx.LEFT, 10)
self.optionsSizer.Add(checkBoxSizer2, 0, wx.LEFT, 10)
self.optionsSizer.Add(checkBoxSizer3, 0, wx.LEFT, 10)
# Entry box for number of advanced start points
advancedStartPointsSizer = wx.BoxSizer(wx.HORIZONTAL)
advancedStartPointsText = wx.StaticText(self, -1, localText.getText("TXT_KEY_ADVANCED_START_POINTS", ()))
self.advancedStartPointsEdit = wx.TextCtrl(self, -1, str(gameData.iAdvancedStartPoints), size=(50, -1))
advancedStartPointsSizer.Add(advancedStartPointsText, 0, wx.TOP, 5)
advancedStartPointsSizer.Add(self.advancedStartPointsEdit, 0, wx.TOP, 5)
self.Bind(wx.EVT_TEXT, self.OnAdvancedStartPointsEntered, self.advancedStartPointsEdit)
mpOptionsSizer.Add(advancedStartPointsSizer, 0, wx.ALL, 5)
# Add our options box to the page
self.pageSizer.Add(self.optionsSizer, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALL, 5)
# Slot status - choices are static
slotStatusList = [localText.getText("TXT_KEY_PITBOSS_HUMAN", ()), localText.getText("TXT_KEY_PITBOSS_COMPUTER", ()), localText.getText("TXT_KEY_PITBOSS_CLOSED", ())]
# Civilizations - get from app
civList = []
civList.append(localText.getText("TXT_KEY_PITBOSS_RANDOM", ()))
rowNum = 0
for rowNum in range(PB.getNumCivs()):
civList.append((PB.getCivAt(rowNum)))
leaderList = [localText.getText("TXT_KEY_PITBOSS_RANDOM", ())]
teamList = []
rowNum = 0
for rowNum in range(gc.getMAX_CIV_PLAYERS()):
teamList.append(str(rowNum+1))
# Handicaps - get from app
diffList = []
rowNum = 0
for rowNum in range(PB.getNumHandicaps()):
diffList.append((PB.getHandicapAt(rowNum)))
#playerPanel = wx.lib.scrolledpanel.ScrolledPanel(self, -1, size=(425, 300), style = wx.SUNKEN_BORDER)
playerPanel = wx.lib.scrolledpanel.ScrolledPanel(self, -1, size=(800, 300), style=wx.SUNKEN_BORDER)
panelSizer = wx.BoxSizer(wx.VERTICAL)
# Create a row - enough for the max players in a Pitboss game
rowNum = 0
for rowNum in range(gc.getMAX_CIV_PLAYERS()):
# Create the border box
border = wx.StaticBox(playerPanel, -1, (localText.getText("TXT_KEY_PITBOSS_PLAYER", (rowNum+1, ))), (0, (rowNum*30)))
# Create the layout mgr
rowSizer = wx.StaticBoxSizer(border, wx.HORIZONTAL)
# Get the info struct
playerData = PB.getPlayerSetupData(rowNum)
# Slot status dropdown
itemSizer = wx.BoxSizer(wx.VERTICAL)
txt = wx.StaticText(playerPanel, -1, (localText.getText("TXT_KEY_PITBOSS_WHO", ())))
dropDown = wx.Choice(playerPanel, rowNum, (-1, -1), choices=slotStatusList)
dropDown.SetSelection(playerData.iWho)
itemSizer.Add(txt)
itemSizer.Add(dropDown)
rowSizer.Add(itemSizer, 0, wx.TOP, 3)
self.whoArray.append(dropDown)
self.Bind(wx.EVT_CHOICE, self.OnPlayerChoice, dropDown)
# Civ dropdown
itemSizer = wx.BoxSizer(wx.VERTICAL)
txt = wx.StaticText(playerPanel, -1, (localText.getText("TXT_KEY_PITBOSS_CIV", ())))
dropDown = wx.Choice(playerPanel, rowNum, (-1, -1), choices=civList)
dropDown.SetSelection(playerData.iCiv+1)
itemSizer.Add(txt)
itemSizer.Add(dropDown)
rowSizer.Add(itemSizer, 0, wx.TOP, 3)
self.civArray.append(dropDown)
self.Bind(wx.EVT_CHOICE, self.OnPlayerChoice, dropDown)
# Leader dropdown
itemSizer = wx.BoxSizer(wx.VERTICAL)
txt = wx.StaticText(playerPanel, -1, (localText.getText("TXT_KEY_PITBOSS_LEADER", ())))
dropDown = wx.Choice(playerPanel, rowNum, (-1, -1), choices=leaderList)
dropDown.SetSelection(playerData.iLeader+1)
itemSizer.Add(txt)
itemSizer.Add(dropDown)
rowSizer.Add(itemSizer, 0, wx.TOP, 3)
self.leaderArray.append(dropDown)
self.Bind(wx.EVT_CHOICE, self.OnPlayerChoice, dropDown)
# Team dropdown
itemSizer = wx.BoxSizer(wx.VERTICAL)
txt = wx.StaticText(playerPanel, -1, (localText.getText("TXT_KEY_PITBOSS_TEAM", ())))
dropDown = wx.Choice(playerPanel, rowNum, (-1, -1), choices=teamList)
dropDown.SetSelection(playerData.iTeam)
itemSizer.Add(txt)
itemSizer.Add(dropDown)
rowSizer.Add(itemSizer, 0, wx.TOP, 3)
self.teamArray.append(dropDown)
self.Bind(wx.EVT_CHOICE, self.OnPlayerChoice, dropDown)
# Difficulty dropdown
itemSizer = wx.BoxSizer(wx.VERTICAL)
txt = wx.StaticText(playerPanel, -1, (localText.getText("TXT_KEY_PITBOSS_DIFFICULTY", ())))
dropDown = wx.Choice(playerPanel, rowNum, (-1, -1), choices=diffList)
dropDown.SetSelection(playerData.iDifficulty)
itemSizer.Add(txt)
itemSizer.Add(dropDown)
rowSizer.Add(itemSizer, 0, wx.TOP, 3)
self.diffArray.append(dropDown)
self.Bind(wx.EVT_CHOICE, self.OnPlayerChoice, dropDown)
# Ready status
itemSizer = wx.BoxSizer(wx.VERTICAL)
txt = wx.StaticText(playerPanel, -1, (localText.getText("TXT_KEY_PITBOSS_STATUS", ())))
statusTxt = wx.StaticText(playerPanel, rowNum, playerData.getStatusText())
itemSizer.Add(txt)
itemSizer.Add(statusTxt)
rowSizer.Add(itemSizer, 0, wx.ALL, 5)
self.statusArray.append(statusTxt)
# Add row to page Sizer
panelSizer.Add(rowSizer, 0, wx.ALL, 5)
playerPanel.SetSizer(panelSizer)
playerPanel.SetAutoLayout(1)
playerPanel.SetupScrolling()
self.pageSizer.Add(playerPanel, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALL, 5)
self.leaderRefresh = False
self.Bind(wx.wizard.EVT_WIZARD_PAGE_CHANGED, self.OnPageChanged)
self.Bind(wx.wizard.EVT_WIZARD_PAGE_CHANGING, self.OnPageChanging)
self.SetSizer(self.pageSizer)
def enableButtons(self):
self.myParent.FindWindowById(wx.ID_FORWARD).Enable(True)
self.myParent.FindWindowById(wx.ID_BACKWARD).Enable(True)
def OnGameChoice(self, event):
self.ChangeGameParam()
def ChangeGameParam(self):
maxTurnsValue = 0
cityEliminationValue = 0
advancedStartPointsValue = 0
turnTimerValue = 0
strValue = self.maxTurnsEdit.GetValue()
if (len(strValue) > 0):
maxTurnsValue = (int)(self.maxTurnsEdit.GetValue())
strValue = self.cityEliminationEdit.GetValue()
if (len(strValue) > 0):
cityEliminationValue = (int)(self.cityEliminationEdit.GetValue())
strValue = self.advancedStartPointsEdit.GetValue()
if (len(strValue) > 0):
advancedStartPointsValue = (int)(self.advancedStartPointsEdit.GetValue())
strValue = self.turnTimerEdit.GetValue()
if (len(strValue) > 0):
turnTimerValue = (int)(self.turnTimerEdit.GetValue())
PB.gameParamChanged(self.mapChoice.GetStringSelection(), self.sizeChoice.GetSelection(),
self.climateChoice.GetSelection(), self.seaLevelChoice.GetSelection(),
self.eraChoice.GetSelection(), self.speedChoice.GetSelection(), maxTurnsValue, cityEliminationValue,
advancedStartPointsValue, turnTimerValue, self.adminPasswordEdit.GetValue())
Webserver.PbTmpSettings["adminpw"] = self.adminPasswordEdit.GetValue()
def OnCustomMapOptionChoice(self, event):
# Get the option ID
optionID = ((event.GetId()/100) - 1)
PB.customMapOptionChanged(optionID, self.customMapOptionArray[optionID].GetSelection())
def IsNumericString(self, myStr):
for myChar in myStr:
if myChar not in string.digits:
return False
return True
def OnMaxTurnsEntered(self, event):
# Check to see if there is a turn string
if ((self.maxTurnsEdit.GetValue() != "")):
# There is, make sure it's a number
if (not self.IsNumericString(self.maxTurnsEdit.GetValue())):
# It's not - lay the smack down
dlg = wx.MessageDialog(
self, localText.getText("TXT_KEY_PITBOSS_MAXTURN_ERROR_DESC", ()),
localText.getText("TXT_KEY_PITBOSS_MAXTURN_ERROR_TITLE", ()), wx.OK | wx.ICON_EXCLAMATION)
# Show the modal dialog and get the response
if dlg.ShowModal() == wx.ID_OK:
# Clear out the MaxTurns Edit box
self.maxTurnsEdit.SetValue("")
else:
# It's a number
self.ChangeGameParam()
else:
# It's been cleared
self.ChangeGameParam()
def OnCityEliminationEntered(self, event):
# Check to see if there is an elimination string
if ((self.cityEliminationEdit.GetValue() != "")):
# There is, make sure it's a number
if (not self.IsNumericString(self.cityEliminationEdit.GetValue())):
# It's not - lay the smack down
dlg = wx.MessageDialog(
self, localText.getText("TXT_KEY_PITBOSS_CITYELIMINATION_ERROR_DESC", ()),
localText.getText("TXT_KEY_PITBOSS_CITYELIMINATION_ERROR_TITLE", ()), wx.OK | wx.ICON_EXCLAMATION)
# Show the modal dialog and get the response
if dlg.ShowModal() == wx.ID_OK:
# Clear out the MaxTurns Edit box
self.cityEliminationEdit.SetValue("")
else:
# It's a number
self.ChangeGameParam()
else:
# It's been cleared
self.ChangeGameParam()
def OnAdvancedStartPointsEntered(self, event):
# Check to see if there is an string
if ((self.advancedStartPointsEdit.GetValue() != "")):
# There is, make sure it's a number
if (not self.IsNumericString(self.advancedStartPointsEdit.GetValue())):
# It's not - lay the smack down
dlg = wx.MessageDialog(
self, localText.getText("TXT_KEY_PITBOSS_CITYELIMINATION_ERROR_DESC", ()),
localText.getText("TXT_KEY_PITBOSS_CITYELIMINATION_ERROR_TITLE", ()), wx.OK | wx.ICON_EXCLAMATION)
# Show the modal dialog and get the response
if dlg.ShowModal() == wx.ID_OK:
# Clear out the MaxTurns Edit box
self.advancedStartPointsEdit.SetValue("")
else:
# It's a number
self.ChangeGameParam()
else:
# It's been cleared
self.ChangeGameParam()
def OnTurnTimeEntered(self, event):
# Check to see if there is a time string
if ((self.turnTimerEdit.GetValue() != "")):
# There is, make sure it's a number
if (not self.IsNumericString(self.turnTimerEdit.GetValue())):
# It's not - lay the smack down
dlg = wx.MessageDialog(
self, localText.getText("TXT_KEY_PITBOSS_TURNTIMER_ERROR_DESC", ()),
localText.getText("TXT_KEY_PITBOSS_TURNTIMER_ERROR_TITLE", ()), wx.OK | wx.ICON_EXCLAMATION)
# Show the modal dialog and get the response
if dlg.ShowModal() == wx.ID_OK:
# Clear out the TurnTimer Edit box
self.turnTimerEdit.SetValue("")
else:
# It's a number
self.ChangeGameParam()
else:
# It's been cleared
self.ChangeGameParam()
def OnAdminPasswordEntered(self, event):
self.ChangeGameParam()
def OnOptionChoice(self, event):
# Get the option ID
optionID = event.GetId()
# Values >= 2000 are victories
if (optionID >= 2000):
PB.victoriesChanged((optionID-2000), self.victoriesArray[(optionID-2000)].GetValue())
# Values >= 1000 are MP options
elif (optionID >= 1000):
PB.mpOptionChanged((optionID-1000), self.mpOptionArray[(optionID-1000)].GetValue())
else:
PB.gameOptionChanged(optionID, self.optionArray[optionID].GetValue())
bEnable = PB.getTurnTimer()
self.turnTimerEdit.Enable(bEnable)
def OnPlayerChoice(self, event):
# Get the row for the player modified
rowNum = event.GetId()
# See if the slot status is valid
if (bScenario and not PB.getNoPlayersScenario()):
if (PB.getWho(rowNum) != self.whoArray[rowNum].GetSelection()):
# Closed status is not permitted - change to AI
if (self.whoArray[rowNum].GetSelection() == 2):
self.whoArray[rowNum].SetSelection(1)
# See if we need to update the leader box
if (not self.leaderRefresh):
self.leaderRefresh = (PB.getCiv(rowNum) != (self.civArray[rowNum].GetSelection()-1))
PB.playerParamChanged(rowNum, self.whoArray[rowNum].GetSelection(), self.civArray[rowNum].GetSelection()-1, self.teamArray[rowNum].GetSelection(),
self.diffArray[rowNum].GetSelection(), PB.getGlobalLeaderIndex(self.civArray[rowNum].GetSelection()-1, self.leaderArray[rowNum].GetSelection()-1))
def OnPageChanging(self, event):
# Check direction
if (not event.GetDirection()):
# We are trying to move backward - reset the network resources
PB.reset()
def OnPageChanged(self, event):
global curPage
# Determine what buttons should be enabled
self.enableButtons()
self.setDefaults()
# We are the current page
curPage = self
def setDefaults(self):
# Display the current initialization information
global bSaved
global bScenario
# Get game data first
PB.resetAdvancedStartPoints()
gameData = PB.getGameSetupData()
self.refreshCustomMapOptions(gameData.getMapName())
# Set the selections currently in our init structure
if (self.mapChoice.FindString(gameData.getMapName()) == wx.NOT_FOUND):
self.mapChoice.Append(gameData.getMapName())
self.mapChoice.SetStringSelection(gameData.getMapName())
self.mapChoice.Enable(not bSaved and not bScenario)
self.sizeChoice.SetSelection(gameData.iSize)
self.sizeChoice.Enable(not bSaved and not bScenario)
self.climateChoice.SetSelection(gameData.iClimate)
self.climateChoice.Enable(not bSaved and not bScenario)
self.seaLevelChoice.SetSelection(gameData.iSeaLevel)
self.seaLevelChoice.Enable(not bSaved and not bScenario)
self.eraChoice.SetSelection(gameData.iEra)
self.eraChoice.Enable(not bSaved and not bScenario)
self.speedChoice.SetSelection(gameData.iSpeed)
self.speedChoice.Enable(not bSaved and not PB.forceSpeed())
self.maxTurnsEdit.SetValue(str(gameData.iMaxTurns))
self.maxTurnsEdit.Enable(not bSaved and not PB.forceMaxTurns())
self.cityEliminationEdit.SetValue(str(gameData.iCityElimination))
self.cityEliminationEdit.Enable(not bSaved and not PB.forceCityElimination())
self.advancedStartPointsEdit.SetValue(str(gameData.iAdvancedStartPoints))
self.advancedStartPointsEdit.Enable(not bSaved and not PB.forceAdvancedStart())
self.turnTimerEdit.SetValue(str(gameData.iTurnTime))
if (not bSaved):
bEnable = PB.getTurnTimer()
self.turnTimerEdit.Enable(bEnable)
else:
self.turnTimerEdit.Disable()
# Set selections of map options
optionNum = 0
for optionNum in range(PB.getNumCustomMapOptions(gameData.getMapName())):
self.customMapOptionArray[optionNum].SetSelection(gameData.getCustomMapOption(optionNum))
self.customMapOptionArray[optionNum].Enable(not bSaved and not bScenario)
# set the mp options selection
rowNum = 0
for rowNum in range(PB.getNumMPOptions()):
self.mpOptionArray[rowNum].SetValue(gameData.getMPOptionAt(rowNum))
self.mpOptionArray[rowNum].Enable(not bSaved)
# set the victories selected
rowNum = 0
for rowNum in range(PB.getNumVictories()):
self.victoriesArray[rowNum].SetValue(gameData.getVictory(rowNum))
self.victoriesArray[rowNum].Enable(not bSaved and not PB.forceVictories() and not PB.isPermanentVictory(rowNum))
# Set the options selected
rowNum = 0
for rowNum in range(PB.getNumOptions()):
self.optionArray[rowNum].SetValue(gameData.getOptionAt(rowNum))
self.optionArray[rowNum].Enable(not bSaved and not PB.forceOptions() and PB.isOptionValid(rowNum))
# Have the app suggest number of players based on map size
PB.suggestPlayerSetup()
rowNum = 0
for rowNum in range(gc.getMAX_CIV_PLAYERS()):
# Get the player data
playerData = PB.getPlayerSetupData(rowNum)
# We may need to add/remove items from who box
self.refreshWhoBox(rowNum, playerData.iWho)
self.whoArray[rowNum].SetSelection(playerData.iWho)
if (playerData.iWho == 1): # AI
self.whoArray[rowNum].Enable(not bSaved and PB.isPlayableCiv(rowNum))
# Civ choices are static inside the instance
civChoice = playerData.iCiv+1
self.civArray[rowNum].SetSelection(civChoice)
self.civArray[rowNum].Enable(not bSaved and (not bScenario or PB.getNoPlayersScenario()))
# We may need to add/remove items from the leader box
self.refreshLeaderBox(rowNum, playerData.iCiv)
self.leaderRefresh = False
self.leaderArray[rowNum].SetSelection(PB.getCivLeaderIndex(civChoice-1, playerData.iLeader)+1)
self.leaderArray[rowNum].Enable(not bSaved and (not bScenario or PB.getNoPlayersScenario()))
# Team choices are static
self.teamArray[rowNum].SetSelection(playerData.iTeam)
self.teamArray[rowNum].Enable(not bSaved and (not bScenario or PB.getNoPlayersScenario()))
# Difficulty choices are static
self.diffArray[rowNum].SetSelection(playerData.iDifficulty)
self.diffArray[rowNum].Enable(not bSaved and not PB.forceDifficulty())
# Status is static
self.statusArray[rowNum].SetLabel(playerData.getStatusText())
def refreshRow(self, iRow):
global bSaved
# Disable finish button if all players not ready to start
bAllReady = True
# Don't wait for ready's if we're loading
if (not bSaved):
index = 0
for index in range(gc.getMAX_CIV_PLAYERS()):
if (PB.getWho(index) == 3): # If a row is taken by a human
if (PB.getReady(index) == False): # If this human is not ready for the event to begin
# Don't allow a launch
bAllReady = False
break
if (bAllReady and PB.isPendingInit()):
bAllReady = False
self.myParent.FindWindowById(wx.ID_FORWARD).Enable(bAllReady)
# Get information from the app for this row
playerData = PB.getPlayerSetupData(iRow)
# Refresh the choices in this slot
self.refreshWhoBox(iRow, playerData.iWho)
self.whoArray[iRow].SetSelection(playerData.iWho)
# Get the Civ and see if we should refresh the list of leaders
dropDown = self.civArray[iRow]
civChoice = playerData.iCiv+1
if (not self.leaderRefresh):
self.leaderRefresh = (civChoice != dropDown.GetSelection())
dropDown.SetSelection(civChoice)
if (self.leaderRefresh):
self.refreshLeaderBox(iRow, playerData.iCiv)
self.leaderRefresh = False
# Get the Leader
dropDown = self.leaderArray[iRow]
dropDown.SetSelection(PB.getCivLeaderIndex(civChoice-1, playerData.iLeader)+1)
# Get the Team
dropDown = self.teamArray[iRow]
dropDown.SetSelection(playerData.iTeam)
# Get the Difficulty
dropDown = self.diffArray[iRow]
dropDown.SetSelection(playerData.iDifficulty)
# Modify Status
self.statusArray[iRow].SetLabel(playerData.getStatusText())
def refreshWhoBox(self, iRow, iWho):
# Add or remove choices depending on the state and the change
dropDown = self.whoArray[iRow]
if (iWho < 3): # Status changing to non-taken state
# Remove the player name from the drop down if it is there
if (dropDown.GetCount() > 3):
dropDown.Delete(3)
else: # Slot taken!
if (dropDown.GetCount() == 3):
# Add and display the player name
dropDown.Append((PB.getName(iRow)))
else:
# Set the current player name with the new one
dropDown.SetString(3, (PB.getName(iRow)))
def refreshLeaderBox(self, iRow, iCiv):
# Need to reset the leader choices - first clear the list
dropDown = self.leaderArray[iRow]
dropDown.Clear()
# Give the Random choice
dropDown.Append((localText.getText("TXT_KEY_PITBOSS_RANDOM", ())))
civChoice = iCiv+1
if (civChoice != 0):
# If there are leaders to list, list them
i = 0
iNumLeaders = PB.getNumLeaders(civChoice-1)
for i in range(iNumLeaders):
dropDown.Append((PB.getCivLeaderAt(civChoice-1, i)))
dropDown.SetSelection(0)
def refreshCustomMapOptions(self, szMapName):
# Clear the widgets from the custom option area
i = 0
for i in range(len(self.customItemSizerArray)):
self.Unbind(wx.EVT_CHOICE, self.customMapOptionArray[i])
currentSizer = self.customItemSizerArray[i]
success = currentSizer.Remove(1) # dropDown
success = currentSizer.Remove(0) # txt
success = self.dropDownSizer.Remove(currentSizer)
self.customMapOptionArray[i].Destroy()
self.customMapTextArray[i].Destroy()
self.buildCustomMapOptions(szMapName)
# Now rebuild the sizers
self.dropDownSizer.Layout()
self.optionsSizer.Layout()
self.pageSizer.Layout()
self.Layout()
def refreshAdvancedStartPoints(self, iPoints):
self.advancedStartPointsEdit.SetValue(str(iPoints))
def buildCustomMapOptions(self, szMapName):
gameData = PB.getGameSetupData()
self.customItemSizerArray = []
self.customMapTextArray = []
self.customMapOptionArray = []
# Create label/control pairs for custom map option
customMapOptionsList = []
optionNum = 0
for optionNum in range(PB.getNumCustomMapOptions(gameData.getMapName())):
customMapOptionValuesList = []
rowNum = 0
for rowNum in range(PB.getNumCustomMapOptionValues(optionNum, gameData.getMapName())):
customMapOptionValuesList.append(PB.getCustomMapOptionDescAt(optionNum, rowNum, gameData.getMapName()))
customMapOptionsList.append(customMapOptionValuesList[:])
optionNum = 0
for optionNum in range(PB.getNumCustomMapOptions(szMapName)):
itemSizer = wx.BoxSizer(wx.VERTICAL)
txt = wx.StaticText(self, -1, PB.getCustomMapOptionName(optionNum, szMapName))
optionDropDown = wx.Choice(self, ((optionNum+1)*100), (-1, -1), choices=customMapOptionsList[optionNum])
optionDropDown.SetSelection(gameData.getCustomMapOption(optionNum))
itemSizer.Add(txt)
itemSizer.Add(optionDropDown)
self.customItemSizerArray.append(itemSizer)
self.customMapTextArray.append(txt)
self.customMapOptionArray.append(optionDropDown)
self.dropDownSizer.Add(itemSizer, 0, wx.TOP, 3)
self.Bind(wx.EVT_CHOICE, self.OnCustomMapOptionChoice, self.customMapOptionArray[optionNum])
#
# Progress bar dialog
#
class ProgressDialog(wx.Dialog):
def __init__(self, parent):
global curPage
wx.Dialog.__init__(self, curPage, -1, localText.getText("TXT_KEY_PITBOSS_PATCH_PROGRESS_TITLE", ()), wx.DefaultPosition, wx.DefaultSize, wx.STAY_ON_TOP)
self.myParent = parent
self.iValue = 0
self.iTotal = 100 # Arbitrary Value until it's actually set
# Place the progress bar
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.progress = None
progressSizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(progressSizer, 0, wx.ALL, 5)
# Add a cancel button
cancelButton = wx.Button(self, -1, localText.getText("TXT_KEY_SCREEN_CANCEL", ()))
cancelButton.SetHelpText(localText.getText("TXT_KEY_CANCEL_PATCH_DOWNLOAD", ()))
self.Bind(wx.EVT_BUTTON, self.OnCancelDownload, cancelButton)
self.sizer.Add(cancelButton, 0, wx.ALL, 5)
self.SetSizer(self.sizer)
def setValue(self, iValue):
if (iValue > 0):
self.iValue = iValue
if (self.progress is not None):
self.progress.SetValue(self.iValue)
def setTotal(self, iTotal):
if (iTotal != self.iTotal):
if (iTotal > 0):
self.iTotal = iTotal
if (self.progress is None):
self.progress = wx.Gauge(self, self.iValue, self.iTotal)
self.sizer.Add(self.progress, 0, wx.ALL, 5)
def OnCancelDownload(self, event):
"Cancel download handler"
# Tell the app
self.myParent.cancelDownload()
# Return to our caller
if (self.IsModal()):
self.EndModal(wx.ID_CANCEL)
else:
self.Show(False)
return wx.ID_CANCEL
#
# main app class
#
class StartupIFace(wx.App):
def OnInit(self):
global curPage
global PBMOD_AUTOSTART
"Create the Pitboss Setup Wizard"
self.wizard = wx.wizard.Wizard(None, -1, (localText.getText("TXT_KEY_PITBOSS_TITLE", ())))
# Create each wizard page
self.modSelect = ModSelectPage(self.wizard)
self.smtpLogin = SMTPLoginPage(self.wizard)
self.netSelect = NetSelectPage(self.wizard)
self.login = LoginPage(self.wizard)
self.loadSelect = LoadSelectPage(self.wizard)
self.scenarioSelect = ScenarioSelectPage(self.wizard)
self.staging = StagingPage(self.wizard)
self.modSelect.SetNext(self.smtpLogin)
self.smtpLogin.SetPrev(self.modSelect)
self.smtpLogin.SetNext(self.netSelect)
self.netSelect.SetPrev(self.smtpLogin)
self.netSelect.SetNext(self.login)
self.login.SetPrev(self.netSelect)
self.login.SetNext(self.loadSelect)
self.loadSelect.SetPrev(self.netSelect)
self.loadSelect.SetNext(self.scenarioSelect)
self.scenarioSelect.SetPrev(self.loadSelect)
self.scenarioSelect.SetNext(self.staging)
self.staging.SetPrev(self.loadSelect)
self.progressDlg = None
if not PBMOD_AUTOSTART:
curPage = self.modSelect
#curPage = self.staging
self.wizard.FitToPage(curPage)
# Create a timer callback that will handle our updates
timerID = wx.NewId()
self.updateTimer = wx.Timer(self, timerID)
self.Bind(wx.EVT_TIMER, self.OnTimedUpdate, id=timerID)
self.updateTimer.Start(250)
# Force automatic start
if PBMOD_AUTOSTART:
# This prevent the wizard page creation in the startup method
curPage = None
# Use predifined values to start up server
# without wizard pages
global bSaved
global bPublic
global bScenario
adminPwd = str(PbSettings.get("save", {}).get("adminpw", ""))
folderIndex = int(PbSettings.get("save", {}).get("folderIndex", 0))
filename = str(PbSettings["save"]["filename"])
(iResult, filepath) = loadSavegame(filename, folderIndex, adminPwd)
if iResult == 0:
PB.setLoadFileName(filepath)
if (not PB.host(bPublic, bScenario)):
PB.reset()
else:
bSaved = True
# if (self.wizard.RunWizard(curPage) and not PB.getDone()):
if True:
PB.getDone()
self.updateTimer.Stop()
PB.launch()
else:
# Loading of savegame failed. Thus, PBMOD_AUTOSTART was not possible
# Missing error message for user here...
self.updateTimer.Stop()
PB.quit()
return True
def startWizard(self):
global curPage
# Try starting the wizard
if curPage is None:
# curPage is None if game was automaticly loaded
pass
else:
if (self.wizard.RunWizard(curPage) and not PB.getDone()):
# launch game here
self.updateTimer.Stop()
PB.launch()
return True
else:
# user cancelled...
self.updateTimer.Stop()
PB.quit()
return False
def OnTimedUpdate(self, event):
# Handle received net messages
PB.handleMessages()
def displayMessageBox(self, title, desc):
#global msgBox
#msgBox = wx.MessageDialog( self, desc, title, wx.OK )
# msgBox.Show(True)
outMsg = title + ":\n" + desc
PB.consoleOut(outMsg)
def patchAvailable(self, patchName, patchUrl):
global curPage
# Save info and display a popup to the user
if ((curPage == self.login) or (curPage == self.loadSelect)):
# Show the popup
curPage.patchAvailable(patchName, patchUrl)
def patchProgress(self, bytesRecvd, bytesTotal):
global bPatchConfirmed
if (not bPatchConfirmed):
# Display our progress
if (self.progressDlg is None):
# Need to create the dialog
self.progressDlg = ProgressDialog(self)
self.progressDlg.Show(True)
self.progressDlg.setTotal(bytesTotal)
self.progressDlg.setValue(bytesRecvd)
def cancelDownload(self):
global bPatchConfirmed
bPatchConfirmed = True
# get rid of the dialog
if (self.progressDlg is not None):
self.progressDlg.Show(False)
self.progressDlg = None
# Tell the application
PB.cancelPatchDownload()
def patchDownloadComplete(self, bSuccess):
# Download complete - check if it was successful
global curPage
global bPatchConfirmed
global bPatchOK
# get rid of the dialog
if (self.progressDlg is not None):
self.progressDlg.Show(False)
self.progressDlg = None
if (bSuccess):
curPage.patchComplete()
else:
bPatchOK = False
msg = wx.MessageBox(localText.getText("TXT_KEY_PITBOSS_PATCH_DOWNLOAD_ERROR_DESC", ()), localText.getText("TXT_KEY_PITBOSS_PATCH_DOWNLOAD_ERROR_TITLE", ()), wx.ICON_ERROR)
bPatchConfirmed = True
curPage.enableButtons()
def upToDate(self):
global curPage
global bPatchConfirmed
global bPatchOK
bPatchConfirmed = True
bPatchOK = True
if ((curPage == self.login) or (curPage == self.loadSelect)):
curPage.enableButtons()
def refreshRow(self, iRow):
global curPage
# Get the latest data from the app and display in the view
if (curPage == self.staging):
# In the staging room, update the row
curPage.refreshRow(iRow)
def refreshCustomMapOptions(self, szMapName):
global curPage
# Refresh the page if we in the staging window
if (curPage == self.staging):
# Update the custom map options in the staging room
curPage.refreshCustomMapOptions(szMapName)
def refreshAdvancedStartPoints(self, iPoints):
global curPage
# Refresh the page if we in the staging window
if (curPage == self.staging):
# Update the custom map options in the staging room
curPage.refreshAdvancedStartPoints(iPoints)
| gpl-2.0 |
saideepchandg/oracle-r12-accounting | lib/django/db/models/fields/subclassing.py | 265 | 2017 | """
Convenience routines for creating non-trivial Field subclasses, as well as
backwards compatibility utilities.
Add SubfieldBase as the metaclass for your Field subclass, implement
to_python() and the other necessary methods and everything will work
seamlessly.
"""
import warnings
from django.utils.deprecation import RemovedInDjango110Warning
class SubfieldBase(type):
"""
A metaclass for custom Field subclasses. This ensures the model's attribute
has the descriptor protocol attached to it.
"""
def __new__(cls, name, bases, attrs):
warnings.warn("SubfieldBase has been deprecated. Use Field.from_db_value instead.",
RemovedInDjango110Warning)
new_class = super(SubfieldBase, cls).__new__(cls, name, bases, attrs)
new_class.contribute_to_class = make_contrib(
new_class, attrs.get('contribute_to_class')
)
return new_class
class Creator(object):
"""
A placeholder class that provides a way to set the attribute on the model.
"""
def __init__(self, field):
self.field = field
def __get__(self, obj, type=None):
if obj is None:
return self
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
obj.__dict__[self.field.name] = self.field.to_python(value)
def make_contrib(superclass, func=None):
"""
Returns a suitable contribute_to_class() method for the Field subclass.
If 'func' is passed in, it is the existing contribute_to_class() method on
the subclass and it is called before anything else. It is assumed in this
case that the existing contribute_to_class() calls all the necessary
superclass methods.
"""
def contribute_to_class(self, cls, name, **kwargs):
if func:
func(self, cls, name, **kwargs)
else:
super(superclass, self).contribute_to_class(cls, name, **kwargs)
setattr(cls, self.name, Creator(self))
return contribute_to_class
| bsd-3-clause |
abircse06/youtube-dl | youtube_dl/extractor/mofosex.py | 41 | 1678 | from __future__ import unicode_literals
import os
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urllib_parse,
)
class MofosexIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?P<url>mofosex\.com/videos/(?P<id>[0-9]+)/.*?\.html)'
_TEST = {
'url': 'http://www.mofosex.com/videos/5018/japanese-teen-music-video.html',
'md5': '1b2eb47ac33cc75d4a80e3026b613c5a',
'info_dict': {
'id': '5018',
'ext': 'mp4',
'title': 'Japanese Teen Music Video',
'age_limit': 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
url = 'http://www.' + mobj.group('url')
req = compat_urllib_request.Request(url)
req.add_header('Cookie', 'age_verified=1')
webpage = self._download_webpage(req, video_id)
video_title = self._html_search_regex(r'<h1>(.+?)<', webpage, 'title')
video_url = compat_urllib_parse.unquote(self._html_search_regex(r'flashvars.video_url = \'([^\']+)', webpage, 'video_url'))
path = compat_urllib_parse_urlparse(video_url).path
extension = os.path.splitext(path)[1][1:]
format = path.split('/')[5].split('_')[:2]
format = "-".join(format)
age_limit = self._rta_search(webpage)
return {
'id': video_id,
'title': video_title,
'url': video_url,
'ext': extension,
'format': format,
'format_id': format,
'age_limit': age_limit,
}
| unlicense |
eckucukoglu/arm-linux-gnueabihf | lib/python2.7/test/test_file_eintr.py | 95 | 10480 | # Written to test interrupted system calls interfering with our many buffered
# IO implementations. http://bugs.python.org/issue12268
#
# This tests the '_io' module. Similar tests for Python 2.x's older
# default file I/O implementation exist within test_file2k.py.
#
# It was suggested that this code could be merged into test_io and the tests
# made to work using the same method as the existing signal tests in test_io.
# I was unable to get single process tests using alarm or setitimer that way
# to reproduce the EINTR problems. This process based test suite reproduces
# the problems prior to the issue12268 patch reliably on Linux and OSX.
# - gregory.p.smith
import os
import select
import signal
import subprocess
import sys
from test.test_support import run_unittest
import time
import unittest
# Test import all of the things we're about to try testing up front.
from _io import FileIO
@unittest.skipUnless(os.name == 'posix', 'tests requires a posix system.')
class TestFileIOSignalInterrupt(unittest.TestCase):
def setUp(self):
self._process = None
def tearDown(self):
if self._process and self._process.poll() is None:
try:
self._process.kill()
except OSError:
pass
def _generate_infile_setup_code(self):
"""Returns the infile = ... line of code for the reader process.
subclasseses should override this to test different IO objects.
"""
return ('import _io ;'
'infile = _io.FileIO(sys.stdin.fileno(), "rb")')
def fail_with_process_info(self, why, stdout=b'', stderr=b'',
communicate=True):
"""A common way to cleanup and fail with useful debug output.
Kills the process if it is still running, collects remaining output
and fails the test with an error message including the output.
Args:
why: Text to go after "Error from IO process" in the message.
stdout, stderr: standard output and error from the process so
far to include in the error message.
communicate: bool, when True we call communicate() on the process
after killing it to gather additional output.
"""
if self._process.poll() is None:
time.sleep(0.1) # give it time to finish printing the error.
try:
self._process.terminate() # Ensure it dies.
except OSError:
pass
if communicate:
stdout_end, stderr_end = self._process.communicate()
stdout += stdout_end
stderr += stderr_end
self.fail('Error from IO process %s:\nSTDOUT:\n%sSTDERR:\n%s\n' %
(why, stdout.decode(), stderr.decode()))
def _test_reading(self, data_to_write, read_and_verify_code):
"""Generic buffered read method test harness to validate EINTR behavior.
Also validates that Python signal handlers are run during the read.
Args:
data_to_write: String to write to the child process for reading
before sending it a signal, confirming the signal was handled,
writing a final newline and closing the infile pipe.
read_and_verify_code: Single "line" of code to read from a file
object named 'infile' and validate the result. This will be
executed as part of a python subprocess fed data_to_write.
"""
infile_setup_code = self._generate_infile_setup_code()
# Total pipe IO in this function is smaller than the minimum posix OS
# pipe buffer size of 512 bytes. No writer should block.
assert len(data_to_write) < 512, 'data_to_write must fit in pipe buf.'
# Start a subprocess to call our read method while handling a signal.
self._process = subprocess.Popen(
[sys.executable, '-u', '-c',
'import io, signal, sys ;'
'signal.signal(signal.SIGINT, '
'lambda s, f: sys.stderr.write("$\\n")) ;'
+ infile_setup_code + ' ;' +
'sys.stderr.write("Worm Sign!\\n") ;'
+ read_and_verify_code + ' ;' +
'infile.close()'
],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the signal handler to be installed.
worm_sign = self._process.stderr.read(len(b'Worm Sign!\n'))
if worm_sign != b'Worm Sign!\n': # See also, Dune by Frank Herbert.
self.fail_with_process_info('while awaiting a sign',
stderr=worm_sign)
self._process.stdin.write(data_to_write)
signals_sent = 0
rlist = []
# We don't know when the read_and_verify_code in our child is actually
# executing within the read system call we want to interrupt. This
# loop waits for a bit before sending the first signal to increase
# the likelihood of that. Implementations without correct EINTR
# and signal handling usually fail this test.
while not rlist:
rlist, _, _ = select.select([self._process.stderr], (), (), 0.05)
self._process.send_signal(signal.SIGINT)
signals_sent += 1
if signals_sent > 200:
self._process.kill()
self.fail('reader process failed to handle our signals.')
# This assumes anything unexpected that writes to stderr will also
# write a newline. That is true of the traceback printing code.
signal_line = self._process.stderr.readline()
if signal_line != b'$\n':
self.fail_with_process_info('while awaiting signal',
stderr=signal_line)
# We append a newline to our input so that a readline call can
# end on its own before the EOF is seen and so that we're testing
# the read call that was interrupted by a signal before the end of
# the data stream has been reached.
stdout, stderr = self._process.communicate(input=b'\n')
if self._process.returncode:
self.fail_with_process_info(
'exited rc=%d' % self._process.returncode,
stdout, stderr, communicate=False)
# PASS!
# String format for the read_and_verify_code used by read methods.
_READING_CODE_TEMPLATE = (
'got = infile.{read_method_name}() ;'
'expected = {expected!r} ;'
'assert got == expected, ('
'"{read_method_name} returned wrong data.\\n"'
'"got data %r\\nexpected %r" % (got, expected))'
)
def test_readline(self):
"""readline() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello, world!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='readline',
expected=b'hello, world!\n'))
def test_readlines(self):
"""readlines() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello\nworld!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='readlines',
expected=[b'hello\n', b'world!\n']))
def test_readall(self):
"""readall() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello\nworld!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='readall',
expected=b'hello\nworld!\n'))
# read() is the same thing as readall().
self._test_reading(
data_to_write=b'hello\nworld!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='read',
expected=b'hello\nworld!\n'))
class TestBufferedIOSignalInterrupt(TestFileIOSignalInterrupt):
def _generate_infile_setup_code(self):
"""Returns the infile = ... line of code to make a BufferedReader."""
return ('infile = io.open(sys.stdin.fileno(), "rb") ;'
'import _io ;assert isinstance(infile, _io.BufferedReader)')
def test_readall(self):
"""BufferedReader.read() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello\nworld!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='read',
expected=b'hello\nworld!\n'))
class TestTextIOSignalInterrupt(TestFileIOSignalInterrupt):
def _generate_infile_setup_code(self):
"""Returns the infile = ... line of code to make a TextIOWrapper."""
return ('infile = io.open(sys.stdin.fileno(), "rt", newline=None) ;'
'import _io ;assert isinstance(infile, _io.TextIOWrapper)')
def test_readline(self):
"""readline() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello, world!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='readline',
expected='hello, world!\n'))
def test_readlines(self):
"""readlines() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello\r\nworld!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='readlines',
expected=['hello\n', 'world!\n']))
def test_readall(self):
"""read() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello\nworld!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='read',
expected="hello\nworld!\n"))
def test_main():
test_cases = [
tc for tc in globals().values()
if isinstance(tc, type) and issubclass(tc, unittest.TestCase)]
run_unittest(*test_cases)
if __name__ == '__main__':
test_main()
| gpl-2.0 |
mathpresso/adminpp | adminpp/builder.py | 2 | 2877 | from django.contrib import admin
class AdminBuilder:
def __init__(self, admin_model_class):
self.admin_model = admin_model_class()
self.model = admin_model_class.Meta.model
def construct_list_display(self, admin_class):
admin_class.list_display = []
for field in self.admin_model.get_fields():
if field.list_display:
# Create getter function that can be injected to admin class
class _getter(object):
def __init__(self, field):
self.field = field
# "_getter" class implements "__call__" method,
# so it can behave like a get_something(self, obj) in ModelAdmin
def __call__(self, obj):
value = self.field.get_value(obj)
return self.field.render(value)
# Add some nice attributes
short_description = field.short_description
admin_order_field = field.admin_order_field
# Append getter function to admin_class
getter_attr_name = 'get_{}'.format(field.name)
setattr(admin_class, getter_attr_name, _getter(field))
# Register it to list_display
admin_class.list_display.append(getter_attr_name)
def construct_get_queuryset(self, admin_class):
# https://github.com/django/django/blob/1.10.3/django/contrib/admin/options.py#L318
def get_queryset(admin_self, request):
# Main difference is that we get queryset from AdminModel instance, not from model's default manager
qs = self.admin_model.get_queryset()
# Others are same with django admin's default implementation
ordering = admin_self.get_ordering(request)
if ordering:
qs = qs.order_by(*ordering)
return qs
admin_class.get_queryset = get_queryset
def construct_meta_kwargs(self, admin_class):
# Copy all attributes at AdminModel.Meta into admin class
for attr_name in dir(self.admin_model.Meta):
# Skip magic methods & predefined attributes
if attr_name.startswith('__'):
continue
elif attr_name in self.admin_model.predefined_meta_fields:
continue
# Copy
setattr(admin_class, attr_name, getattr(self.admin_model.Meta, attr_name))
def build(self):
# Create new ModelAdmin class
class BuilderAdmin(admin.ModelAdmin):
pass
# Setup
self.construct_list_display(BuilderAdmin)
self.construct_get_queuryset(BuilderAdmin)
self.construct_meta_kwargs(BuilderAdmin)
return BuilderAdmin
| mit |
rrrene/django | django/core/serializers/pyyaml.py | 439 | 2843 | """
YAML serializer.
Requires PyYaml (http://pyyaml.org/), but that's checked for in __init__.
"""
import collections
import decimal
import sys
from io import StringIO
import yaml
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import (
Deserializer as PythonDeserializer, Serializer as PythonSerializer,
)
from django.db import models
from django.utils import six
# Use the C (faster) implementation if possible
try:
from yaml import CSafeLoader as SafeLoader
from yaml import CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader, SafeDumper
class DjangoSafeDumper(SafeDumper):
def represent_decimal(self, data):
return self.represent_scalar('tag:yaml.org,2002:str', str(data))
def represent_ordered_dict(self, data):
return self.represent_mapping('tag:yaml.org,2002:map', data.items())
DjangoSafeDumper.add_representer(decimal.Decimal, DjangoSafeDumper.represent_decimal)
DjangoSafeDumper.add_representer(collections.OrderedDict, DjangoSafeDumper.represent_ordered_dict)
class Serializer(PythonSerializer):
"""
Convert a queryset to YAML.
"""
internal_use_only = False
def handle_field(self, obj, field):
# A nasty special case: base YAML doesn't support serialization of time
# types (as opposed to dates or datetimes, which it does support). Since
# we want to use the "safe" serializer for better interoperability, we
# need to do something with those pesky times. Converting 'em to strings
# isn't perfect, but it's better than a "!!python/time" type which would
# halt deserialization under any other language.
if isinstance(field, models.TimeField) and getattr(obj, field.name) is not None:
self._current[field.name] = str(getattr(obj, field.name))
else:
super(Serializer, self).handle_field(obj, field)
def end_serialization(self):
yaml.dump(self.objects, self.stream, Dumper=DjangoSafeDumper, **self.options)
def getvalue(self):
# Grand-parent super
return super(PythonSerializer, self).getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of YAML data.
"""
if isinstance(stream_or_string, bytes):
stream_or_string = stream_or_string.decode('utf-8')
if isinstance(stream_or_string, six.string_types):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
try:
for obj in PythonDeserializer(yaml.load(stream, Loader=SafeLoader), **options):
yield obj
except GeneratorExit:
raise
except Exception as e:
# Map to deserializer error
six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])
| bsd-3-clause |
aronparsons/spacewalk | backend/server/test/TestRhnpush.py | 4 | 1463 | #
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import TestServer
import server.app.packages
import SimpleXMLRPCServer
class TestRhnpush(TestServer.TestServer):
def __init__(self):
TestServer.TestServer.__init__(self)
self._init_app()
def _init_app(self):
self.app = server.app.packages.Packages()
def getApp(self):
return self.app
if __name__ == "__main__":
server = TestRhnpush()
app = server.getApp()
print app.test_login(server.getUsername(), server.getPassword())
print app.listChannel(['wregglej-test'], "wregglej", "bm8gv5z2")
print app.listChannelSource(['wregglej-test'], "wregglej", "bm8gv5z2")
server = SimpleXMLRPCServer.SimpleXMLRPCServer(addr=('', 16000))
for func in app.functions:
print func
server.register_function(getattr(app, func), name="app.%s" % (func))
server.serve_forever()
| gpl-2.0 |
jcpowermac/ansible | test/runner/lib/changes.py | 66 | 5904 | """Detect changes in Ansible code."""
from __future__ import absolute_import, print_function
import re
import os
from lib.util import (
ApplicationError,
SubprocessError,
MissingEnvironmentVariable,
CommonConfig,
display,
)
from lib.http import (
HttpClient,
urlencode,
)
from lib.git import (
Git,
)
class InvalidBranch(ApplicationError):
"""Exception for invalid branch specification."""
def __init__(self, branch, reason):
"""
:type branch: str
:type reason: str
"""
message = 'Invalid branch: %s\n%s' % (branch, reason)
super(InvalidBranch, self).__init__(message)
self.branch = branch
class ChangeDetectionNotSupported(ApplicationError):
"""Exception for cases where change detection is not supported."""
pass
class ShippableChanges(object):
"""Change information for Shippable build."""
def __init__(self, args, git):
"""
:type args: CommonConfig
:type git: Git
"""
self.args = args
try:
self.branch = os.environ['BRANCH']
self.is_pr = os.environ['IS_PULL_REQUEST'] == 'true'
self.is_tag = os.environ['IS_GIT_TAG'] == 'true'
self.commit = os.environ['COMMIT']
self.project_id = os.environ['PROJECT_ID']
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
if self.is_tag:
raise ChangeDetectionNotSupported('Change detection is not supported for tags.')
if self.is_pr:
self.paths = sorted(git.get_diff_names(['origin/%s' % self.branch, '--']))
self.diff = git.get_diff(['origin/%s' % self.branch, '--'])
else:
merge_runs = self.get_merge_runs(self.project_id, self.branch)
last_successful_commit = self.get_last_successful_commit(git, merge_runs)
if last_successful_commit:
self.paths = sorted(git.get_diff_names([last_successful_commit, self.commit]))
self.diff = git.get_diff([last_successful_commit, self.commit])
else:
# first run for branch
self.paths = None # act as though change detection not enabled, do not filter targets
self.diff = []
def get_merge_runs(self, project_id, branch):
"""
:type project_id: str
:type branch: str
:rtype: list[dict]
"""
params = dict(
isPullRequest='false',
projectIds=project_id,
branch=branch,
)
client = HttpClient(self.args, always=True)
response = client.get('https://api.shippable.com/runs?%s' % urlencode(params))
return response.json()
@staticmethod
def get_last_successful_commit(git, merge_runs):
"""
:type git: Git
:type merge_runs: dict | list[dict]
:rtype: str
"""
if 'id' in merge_runs and merge_runs['id'] == 4004:
display.warning('Unable to find project. Cannot determine changes. All tests will be executed.')
return None
merge_runs = sorted(merge_runs, key=lambda r: r['createdAt'])
known_commits = set()
last_successful_commit = None
for merge_run in merge_runs:
commit_sha = merge_run['commitSha']
if commit_sha not in known_commits:
known_commits.add(commit_sha)
if merge_run['statusCode'] == 30:
if git.is_valid_ref(commit_sha):
last_successful_commit = commit_sha
if last_successful_commit is None:
display.warning('No successful commit found. All tests will be executed.')
return last_successful_commit
class LocalChanges(object):
"""Change information for local work."""
def __init__(self, args, git):
"""
:type args: CommonConfig
:type git: Git
"""
self.args = args
self.current_branch = git.get_branch()
if self.is_official_branch(self.current_branch):
raise InvalidBranch(branch=self.current_branch,
reason='Current branch is not a feature branch.')
self.fork_branch = None
self.fork_point = None
self.local_branches = sorted(git.get_branches())
self.official_branches = sorted([b for b in self.local_branches if self.is_official_branch(b)])
for self.fork_branch in self.official_branches:
try:
self.fork_point = git.get_branch_fork_point(self.fork_branch)
break
except SubprocessError:
pass
if self.fork_point is None:
raise ApplicationError('Unable to auto-detect fork branch and fork point.')
# tracked files (including unchanged)
self.tracked = sorted(git.get_file_names(['--cached']))
# untracked files (except ignored)
self.untracked = sorted(git.get_file_names(['--others', '--exclude-standard']))
# tracked changes (including deletions) committed since the branch was forked
self.committed = sorted(git.get_diff_names([self.fork_point, 'HEAD']))
# tracked changes (including deletions) which are staged
self.staged = sorted(git.get_diff_names(['--cached']))
# tracked changes (including deletions) which are not staged
self.unstaged = sorted(git.get_diff_names([]))
# diff of all tracked files from fork point to working copy
self.diff = git.get_diff([self.fork_point])
@staticmethod
def is_official_branch(name):
"""
:type name: str
:rtype: bool
"""
if name == 'devel':
return True
if re.match(r'^stable-[0-9]+\.[0-9]+$', name):
return True
return False
| gpl-3.0 |
hakonsbm/nest-simulator | doc/topology/examples/connex_ew.py | 17 | 2229 | # -*- coding: utf-8 -*-
#
# connex_ew.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
NEST Topology Module Example
Create two 30x30 layers of iaf_psc_alpha neurons with edge_wrap,
connect with circular mask, flat probability,
visualize.
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
'''
import pylab
import nest
import nest.topology as topo
pylab.ion()
nest.ResetKernel()
# create two test layers
a = topo.CreateLayer({'columns': 30, 'rows': 30, 'extent': [3.0, 3.0],
'elements': 'iaf_psc_alpha'})
b = topo.CreateLayer({'columns': 30, 'rows': 30, 'extent': [3.0, 3.0],
'elements': 'iaf_psc_alpha', 'edge_wrap': True})
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 0.5}},
'kernel': 0.5,
'weights': {'uniform': {'min': 0.5, 'max': 2.0}},
'delays': 1.0}
topo.ConnectLayers(a, b, conndict)
pylab.clf()
# plot targets of neurons in different grid locations
# first, clear existing figure, get current figure
pylab.clf()
fig = pylab.gcf()
# plot targets of two source neurons into same figure, with mask
for src_pos in [[15, 15], [0, 0]]:
# obtain node id for center
src = topo.GetElement(a, src_pos)
topo.PlotTargets(src, b, mask=conndict['mask'], fig=fig)
# beautify
pylab.axes().set_xticks(pylab.arange(-1.5, 1.55, 0.5))
pylab.axes().set_yticks(pylab.arange(-1.5, 1.55, 0.5))
pylab.grid(True)
pylab.axis([-2.0, 2.0, -2.0, 2.0])
pylab.axes().set_aspect('equal', 'box')
pylab.title('Connection targets')
# pylab.savefig('connex_ew.pdf')
| gpl-2.0 |
Amitgb14/sos | sos/plugins/snmp.py | 12 | 1351 | # Copyright (C) 2007 Sadique Puthen <sputhenp@redhat.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class Snmp(Plugin):
"""Simple network management protocol
"""
plugin_name = "snmp"
profiles = ('system', 'sysmgmt')
files = ('/etc/snmp/snmpd.conf',)
def setup(self):
self.add_copy_spec("/etc/snmp")
class RedHatSnmp(Snmp, RedHatPlugin):
packages = ('net-snmp',)
def setup(self):
super(RedHatSnmp, self).setup()
class DebianSnmp(Snmp, DebianPlugin, UbuntuPlugin):
packages = ('snmp',)
def setup(self):
super(DebianSnmp, self).setup()
# vim: set et ts=4 sw=4 :
| gpl-2.0 |
catapult-project/catapult | third_party/pyasn1/pyasn1/type/char.py | 11 | 10771 | #
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
from pyasn1 import error
from pyasn1.type import tag
from pyasn1.type import univ
__all__ = ['NumericString', 'PrintableString', 'TeletexString', 'T61String', 'VideotexString',
'IA5String', 'GraphicString', 'VisibleString', 'ISO646String',
'GeneralString', 'UniversalString', 'BMPString', 'UTF8String']
NoValue = univ.NoValue
noValue = univ.noValue
class AbstractCharacterString(univ.OctetString):
"""Creates |ASN.1| schema or value object.
|ASN.1| objects are immutable and duck-type Python 2 :class:`unicode` or Python 3 :class:`str`.
When used in octet-stream context, |ASN.1| type assumes "|encoding|" encoding.
Keyword Args
------------
value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object
unicode object (Python 2) or string (Python 3), alternatively string
(Python 2) or bytes (Python 3) representing octet-stream of serialised
unicode string (note `encoding` parameter) or |ASN.1| class instance.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
encoding: :py:class:`str`
Unicode codec ID to encode/decode :class:`unicode` (Python 2) or
:class:`str` (Python 3) the payload when |ASN.1| object is used
in octet-stream context.
Raises
------
:py:class:`~pyasn1.error.PyAsn1Error`
On constraint violation or bad initializer.
"""
if sys.version_info[0] <= 2:
def __str__(self):
try:
# `str` is Py2 text representation
return self._value.encode(self.encoding)
except UnicodeEncodeError:
raise error.PyAsn1Error(
"Can't encode string '%s' with codec %s" % (self._value, self.encoding)
)
def __unicode__(self):
return unicode(self._value)
def prettyIn(self, value):
try:
if isinstance(value, unicode):
return value
elif isinstance(value, str):
return value.decode(self.encoding)
elif isinstance(value, (tuple, list)):
return self.prettyIn(''.join([chr(x) for x in value]))
elif isinstance(value, univ.OctetString):
return value.asOctets().decode(self.encoding)
else:
return unicode(value)
except (UnicodeDecodeError, LookupError):
raise error.PyAsn1Error(
"Can't decode string '%s' with codec %s" % (value, self.encoding)
)
def asOctets(self, padding=True):
return str(self)
def asNumbers(self, padding=True):
return tuple([ord(x) for x in str(self)])
else:
def __str__(self):
# `unicode` is Py3 text representation
return str(self._value)
def __bytes__(self):
try:
return self._value.encode(self.encoding)
except UnicodeEncodeError:
raise error.PyAsn1Error(
"Can't encode string '%s' with codec %s" % (self._value, self.encoding)
)
def prettyIn(self, value):
try:
if isinstance(value, str):
return value
elif isinstance(value, bytes):
return value.decode(self.encoding)
elif isinstance(value, (tuple, list)):
return self.prettyIn(bytes(value))
elif isinstance(value, univ.OctetString):
return value.asOctets().decode(self.encoding)
else:
return str(value)
except (UnicodeDecodeError, LookupError):
raise error.PyAsn1Error(
"Can't decode string '%s' with codec %s" % (value, self.encoding)
)
def asOctets(self, padding=True):
return bytes(self)
def asNumbers(self, padding=True):
return tuple(bytes(self))
#
# See OctetString.prettyPrint() for the explanation
#
def prettyOut(self, value):
return value
def prettyPrint(self, scope=0):
# first see if subclass has its own .prettyOut()
value = self.prettyOut(self._value)
if value is not self._value:
return value
return AbstractCharacterString.__str__(self)
def __reversed__(self):
return reversed(self._value)
class NumericString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 18)
)
encoding = 'us-ascii'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class PrintableString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 19)
)
encoding = 'us-ascii'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class TeletexString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 20)
)
encoding = 'iso-8859-1'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class T61String(TeletexString):
__doc__ = TeletexString.__doc__
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class VideotexString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 21)
)
encoding = 'iso-8859-1'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class IA5String(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 22)
)
encoding = 'us-ascii'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class GraphicString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 25)
)
encoding = 'iso-8859-1'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class VisibleString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 26)
)
encoding = 'us-ascii'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class ISO646String(VisibleString):
__doc__ = VisibleString.__doc__
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class GeneralString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 27)
)
encoding = 'iso-8859-1'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class UniversalString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 28)
)
encoding = "utf-32-be"
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class BMPString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 30)
)
encoding = "utf-16-be"
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class UTF8String(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12)
)
encoding = "utf-8"
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
| bsd-3-clause |
anshumanchatterji/selenium | py/selenium/webdriver/firefox/remote_connection.py | 10 | 1145 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.webdriver.remote.remote_connection import RemoteConnection
class FirefoxRemoteConnection(RemoteConnection):
def __init__(self, remote_server_addr, keep_alive=True):
RemoteConnection.__init__(self, remote_server_addr, keep_alive)
self._commands["SET_CONTEXT"] = ('POST',
'/session/$sessionId/moz/context')
| apache-2.0 |
matmutant/sl4a | python-build/python-libs/gdata/tests/atom_tests/http_interface_test.py | 87 | 1493 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import unittest
import atom.http_interface
import StringIO
class HttpResponseTest(unittest.TestCase):
def testConstructorWithStrings(self):
resp = atom.http_interface.HttpResponse(body='Hi there!', status=200,
reason='OK', headers={'Content-Length':'9'})
self.assertEqual(resp.read(amt=1), 'H')
self.assertEqual(resp.read(amt=2), 'i ')
self.assertEqual(resp.read(), 'there!')
self.assertEqual(resp.read(), '')
self.assertEqual(resp.reason, 'OK')
self.assertEqual(resp.status, 200)
self.assertEqual(resp.getheader('Content-Length'), '9')
self.assertTrue(resp.getheader('Missing') is None)
self.assertEqual(resp.getheader('Missing', default='yes'), 'yes')
def suite():
return unittest.TestSuite((unittest.makeSuite(HttpResponseTest,'test'),))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.