repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
sendwithus/sendwithus_python | setup.py | 1 | 1233 | from distutils.core import setup
from setuptools import find_packages
from io import open
with open('README.md', encoding="utf-8") as fp:
long_description = fp.read()
setup(
name='sendwithus',
version='5.2.2',
author='sendwithus',
author_email='us@sendwithus.com',
packages=find_packages(),
scripts=[],
url='https://github.com/sendwithus/sendwithus_python',
license='LICENSE.txt',
description='Python API client for sendwithus.com',
long_description=long_description,
long_description_content_type='text/markdown',
test_suite="sendwithus.test",
install_requires=[
"requests >= 2.0.0",
"six >= 1.9.0"
],
extras_require={
"test": [
"pytest >= 3.0.5",
"pytest-xdist >= 1.15.0"
]
},
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"License :: OSI Approved :: Apache Software License",
"Development Status :: 5 - Production/Stable",
"Topic :: Communications :: Email"
]
)
| apache-2.0 |
OptimusGitEtna/RestSymf | Python-3.4.2/Lib/idlelib/CallTips.py | 97 | 5932 | """CallTips.py - An IDLE Extension to Jog Your Memory
Call Tips are floating windows which display function, class, and method
parameter and docstring information when you type an opening parenthesis, and
which disappear when you type a closing parenthesis.
"""
import __main__
import inspect
import re
import sys
import textwrap
import types
from idlelib import CallTipWindow
from idlelib.HyperParser import HyperParser
class CallTips:
menudefs = [
('edit', [
("Show call tip", "<<force-open-calltip>>"),
])
]
def __init__(self, editwin=None):
if editwin is None: # subprocess and test
self.editwin = None
else:
self.editwin = editwin
self.text = editwin.text
self.active_calltip = None
self._calltip_window = self._make_tk_calltip_window
def close(self):
self._calltip_window = None
def _make_tk_calltip_window(self):
# See __init__ for usage
return CallTipWindow.CallTip(self.text)
def _remove_calltip_window(self, event=None):
if self.active_calltip:
self.active_calltip.hidetip()
self.active_calltip = None
def force_open_calltip_event(self, event):
"The user selected the menu entry or hotkey, open the tip."
self.open_calltip(True)
def try_open_calltip_event(self, event):
"""Happens when it would be nice to open a CallTip, but not really
necessary, for example after an opening bracket, so function calls
won't be made.
"""
self.open_calltip(False)
def refresh_calltip_event(self, event):
if self.active_calltip and self.active_calltip.is_active():
self.open_calltip(False)
def open_calltip(self, evalfuncs):
self._remove_calltip_window()
hp = HyperParser(self.editwin, "insert")
sur_paren = hp.get_surrounding_brackets('(')
if not sur_paren:
return
hp.set_index(sur_paren[0])
expression = hp.get_expression()
if not expression:
return
if not evalfuncs and (expression.find('(') != -1):
return
argspec = self.fetch_tip(expression)
if not argspec:
return
self.active_calltip = self._calltip_window()
self.active_calltip.showtip(argspec, sur_paren[0], sur_paren[1])
def fetch_tip(self, expression):
"""Return the argument list and docstring of a function or class.
If there is a Python subprocess, get the calltip there. Otherwise,
either this fetch_tip() is running in the subprocess or it was
called in an IDLE running without the subprocess.
The subprocess environment is that of the most recently run script. If
two unrelated modules are being edited some calltips in the current
module may be inoperative if the module was not the last to run.
To find methods, fetch_tip must be fed a fully qualified name.
"""
try:
rpcclt = self.editwin.flist.pyshell.interp.rpcclt
except AttributeError:
rpcclt = None
if rpcclt:
return rpcclt.remotecall("exec", "get_the_calltip",
(expression,), {})
else:
return get_argspec(get_entity(expression))
def get_entity(expression):
"""Return the object corresponding to expression evaluated
in a namespace spanning sys.modules and __main.dict__.
"""
if expression:
namespace = sys.modules.copy()
namespace.update(__main__.__dict__)
try:
return eval(expression, namespace)
except BaseException:
# An uncaught exception closes idle, and eval can raise any
# exception, especially if user classes are involved.
return None
# The following are used in get_argspec and some in tests
_MAX_COLS = 85
_MAX_LINES = 5 # enough for bytes
_INDENT = ' '*4 # for wrapped signatures
_first_param = re.compile('(?<=\()\w*\,?\s*')
_default_callable_argspec = "See source or doc"
def get_argspec(ob):
'''Return a string describing the signature of a callable object, or ''.
For Python-coded functions and methods, the first line is introspected.
Delete 'self' parameter for classes (.__init__) and bound methods.
The next lines are the first lines of the doc string up to the first
empty line or _MAX_LINES. For builtins, this typically includes
the arguments in addition to the return value.
'''
argspec = ""
try:
ob_call = ob.__call__
except BaseException:
return argspec
if isinstance(ob, type):
fob = ob.__init__
elif isinstance(ob_call, types.MethodType):
fob = ob_call
else:
fob = ob
if isinstance(fob, (types.FunctionType, types.MethodType)):
argspec = inspect.formatargspec(*inspect.getfullargspec(fob))
if (isinstance(ob, (type, types.MethodType)) or
isinstance(ob_call, types.MethodType)):
argspec = _first_param.sub("", argspec)
lines = (textwrap.wrap(argspec, _MAX_COLS, subsequent_indent=_INDENT)
if len(argspec) > _MAX_COLS else [argspec] if argspec else [])
if isinstance(ob_call, types.MethodType):
doc = ob_call.__doc__
else:
doc = getattr(ob, "__doc__", "")
if doc:
for line in doc.split('\n', _MAX_LINES)[:_MAX_LINES]:
line = line.strip()
if not line:
break
if len(line) > _MAX_COLS:
line = line[: _MAX_COLS - 3] + '...'
lines.append(line)
argspec = '\n'.join(lines)
if not argspec:
argspec = _default_callable_argspec
return argspec
if __name__ == '__main__':
from unittest import main
main('idlelib.idle_test.test_calltips', verbosity=2)
| mit |
probcomp/cgpm | tests/disabled_test_render_utils.py | 1 | 5792 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Graphical test suite providing coverage for cgpm.utils.render."""
import os
from string import ascii_uppercase
import numpy as np
from cgpm.crosscat.state import State
from cgpm.mixtures.view import View
from cgpm.utils import config as cu
from cgpm.utils import general as gu
from cgpm.utils import render as ru
OUT = '/tmp/'
RNG = gu.gen_rng(7)
TIMESTAMP = cu.timestamp()
# Define datasets.
test_dataset_dpmm = np.array([
[1, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 1],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
])
test_dataset_with_distractors = np.array([
[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
])
test_dataset_mixed = np.hstack((
np.array(test_dataset_dpmm),
RNG.normal(10, 5, size=[12, 6]),
))
test_dataset_mixed_nan = np.vstack((test_dataset_mixed, [np.nan]*12))
test_dataset_wide = np.hstack(
(test_dataset_mixed, test_dataset_mixed, test_dataset_mixed))
test_dataset_tall = np.vstack(
(test_dataset_mixed, test_dataset_mixed, test_dataset_mixed))
# Initialize DPMM and CrossCat models for the above data.
def init_view_state(data, iters, cctypes):
if isinstance(data, list):
data = np.array(data)
D = len(data[0])
outputs = range(D)
X = {c: data[:, i].tolist() for i, c in enumerate(outputs)}
view = View(X, cctypes=cctypes, outputs=[1000] + outputs, rng=RNG)
state = State(data[:, 0:D], outputs=outputs, cctypes=cctypes, rng=RNG)
if iters > 0:
view.transition(iters)
state.transition(iters)
return view, state
# Helpers
def string_generator(N=1, length=10):
from random import choice
return [
(''.join(choice(ascii_uppercase) for _ in xrange(length)))
for _ in xrange(N)
]
def get_filename(name):
return os.path.join(OUT, '%s_%s' % (TIMESTAMP, name,))
# Global variables for test cases involving a CrossCat state.
VIEW, STATE = init_view_state(
test_dataset_mixed_nan, 25, ['bernoulli']*6 + ['normal']*6)
ROW_NAMES = string_generator(13, 10)
COL_NAMES = string_generator(12, 7)
# Test cases
def test_viz_data():
fig, _ax = ru.viz_data(test_dataset_mixed_nan,)
fig.savefig(get_filename('test_viz_data.png'))
def test_viz_data_with_names():
row_names = string_generator(12, 10)
col_names = string_generator(6, 7)
fig, _ax = ru.viz_data(
test_dataset_dpmm, row_names=row_names, col_names=col_names)
fig.savefig(get_filename('test_viz_data_with_names.png'))
def test_viz_wide_data():
fig, _ax = ru.viz_data(test_dataset_wide)
fig.savefig(get_filename('test_viz_wide_data.png'))
def test_viz_tall_data():
fig, _ax = ru.viz_data(test_dataset_tall)
fig.savefig(get_filename('test_viz_tall_data.png'))
def test_viz_view():
fig, _ax = ru.viz_view(VIEW)
fig.savefig(get_filename('test_viz_view.png'))
def test_viz_view_with_names():
fig, _ax = ru.viz_view(VIEW, row_names=ROW_NAMES, col_names=COL_NAMES)
fig.savefig(get_filename('test_viz_view_with_names.png'))
def test_viz_view_with_names_subsample():
fig, _ax = ru.viz_view(
VIEW,
row_names=ROW_NAMES,
col_names=COL_NAMES,
subsample=2,
seed=2,
yticklabelsize='medium',
)
fig.savefig(get_filename('test_viz_view_with_names_subsample.png'))
def test_viz_state():
fig, _ax = ru.viz_state(STATE)
fig.savefig(get_filename('test_viz_state.png'))
def test_viz_state_with_names():
fig, _ax = ru.viz_state(STATE, row_names=ROW_NAMES, col_names=COL_NAMES)
fig.savefig(get_filename('test_viz_state_with_names.png'))
def test_viz_state_with_names_subsample():
# The subsampled rows should be the same for all views since identical seed
# is passed to viz_view_raw, and each function creates its own rng.
fig, _ax = ru.viz_state(
STATE,
row_names=ROW_NAMES,
col_names=COL_NAMES,
subsample=4,
seed=2,
yticklabelsize='medium',
)
fig.savefig(get_filename('test_viz_state_with_names_subsample.png'))
| apache-2.0 |
google-research/realworldrl_suite | examples/run_ppo.py | 1 | 3367 | # coding=utf-8
# Copyright 2020 The Real-World RL Suite Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains an OpenAI Baselines PPO agent on realworldrl.
Note that OpenAI Gym is not installed with realworldrl by default.
See also github.com/openai/baselines for more information.
This example also relies on dm2gym for its gym environment wrapper.
See github.com/zuoxingdong/dm2gym for more information.
"""
import os
from absl import app
from absl import flags
from baselines import bench
from baselines.common.vec_env import dummy_vec_env
from baselines.ppo2 import ppo2
import dm2gym.envs.dm_suite_env as dm2gym
import realworldrl_suite.environments as rwrl
flags.DEFINE_string('domain_name', 'cartpole', 'domain to solve')
flags.DEFINE_string('task_name', 'realworld_balance', 'task to solve')
flags.DEFINE_string('save_path', '/tmp/rwrl', 'where to save results')
flags.DEFINE_boolean('verbose', True, 'whether to log to std output')
flags.DEFINE_string('network', 'mlp', 'name of network architecture')
flags.DEFINE_float('agent_discount', .99, 'discounting on the agent side')
flags.DEFINE_integer('nsteps', 100, 'number of steps per ppo rollout')
flags.DEFINE_integer('total_timesteps', 1000000, 'total steps for experiment')
flags.DEFINE_float('learning_rate', 1e-3, 'learning rate for optimizer')
FLAGS = flags.FLAGS
class GymEnv(dm2gym.DMSuiteEnv):
"""Wrapper that convert a realworldrl environment to a gym environment."""
def __init__(self, env):
"""Constructor. We reuse the facilities from dm2gym."""
self.env = env
self.metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': round(1. / self.env.control_timestep())
}
self.observation_space = dm2gym.convert_dm_control_to_gym_space(
self.env.observation_spec())
self.action_space = dm2gym.convert_dm_control_to_gym_space(
self.env.action_spec())
self.viewer = None
def run():
"""Runs a PPO agent on a given environment."""
def _load_env():
"""Loads environment."""
raw_env = rwrl.load(
domain_name=FLAGS.domain_name,
task_name=FLAGS.task_name,
safety_spec=dict(enable=True),
delay_spec=dict(enable=True, actions=20),
log_output=os.path.join(FLAGS.save_path, 'log.npz'),
environment_kwargs=dict(
log_safety_vars=True, log_every=20, flat_observation=True))
env = GymEnv(raw_env)
env = bench.Monitor(env, FLAGS.save_path)
return env
env = dummy_vec_env.DummyVecEnv([_load_env])
ppo2.learn(
env=env,
network=FLAGS.network,
lr=FLAGS.learning_rate,
total_timesteps=FLAGS.total_timesteps, # make sure to run enough steps
nsteps=FLAGS.nsteps,
gamma=FLAGS.agent_discount,
)
def main(argv):
del argv # Unused.
run()
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
pkreissl/espresso | testsuite/python/lb_thermostat.py | 3 | 2652 | # Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import numpy as np
import espressomd.lb
from thermostats_common import ThermostatsCommon
"""
Check the lattice-Boltzmann thermostat with respect to the particle velocity
distribution.
"""
KT = 0.25
AGRID = 2.5
VISC = 2.7
DENS = 1.7
TIME_STEP = 0.05
LB_PARAMS = {'agrid': AGRID,
'dens': DENS,
'visc': VISC,
'tau': TIME_STEP,
'kT': KT,
'seed': 123}
class LBThermostatCommon(ThermostatsCommon):
"""Base class of the test that holds the test logic."""
lbf = None
system = espressomd.System(box_l=[10.0, 10.0, 10.0])
system.time_step = TIME_STEP
system.cell_system.skin = 0.4 * AGRID
def prepare(self):
self.system.actors.clear()
self.system.actors.add(self.lbf)
self.system.part.add(
pos=np.random.random((100, 3)) * self.system.box_l)
self.system.thermostat.set_lb(LB_fluid=self.lbf, seed=5, gamma=5.0)
def test_velocity_distribution(self):
self.prepare()
self.system.integrator.run(20)
N = len(self.system.part)
loops = 250
v_stored = np.zeros((loops, N, 3))
for i in range(loops):
self.system.integrator.run(3)
v_stored[i] = self.system.part[:].v
minmax = 5
n_bins = 7
error_tol = 0.01
self.check_velocity_distribution(
v_stored.reshape((-1, 3)), minmax, n_bins, error_tol, KT)
class LBCPUThermostat(ut.TestCase, LBThermostatCommon):
"""Test for the CPU implementation of the LB."""
def setUp(self):
self.lbf = espressomd.lb.LBFluid(**LB_PARAMS)
@utx.skipIfMissingGPU()
class LBGPUThermostat(ut.TestCase, LBThermostatCommon):
"""Test for the GPU implementation of the LB."""
def setUp(self):
self.lbf = espressomd.lb.LBFluidGPU(**LB_PARAMS)
if __name__ == '__main__':
ut.main()
| gpl-3.0 |
CSD-Public/stonix | src/tests/rules/unit_tests/zzzTestRuleDisableInteractiveStartup.py | 1 | 4581 | #!/usr/bin/env python3
###############################################################################
# #
# Copyright 2019. Triad National Security, LLC. All rights reserved. #
# This program was produced under U.S. Government contract 89233218CNA000001 #
# for Los Alamos National Laboratory (LANL), which is operated by Triad #
# National Security, LLC for the U.S. Department of Energy/National Nuclear #
# Security Administration. #
# #
# All rights in the program are reserved by Triad National Security, LLC, and #
# the U.S. Department of Energy/National Nuclear Security Administration. The #
# Government is granted for itself and others acting on its behalf a #
# nonexclusive, paid-up, irrevocable worldwide license in this material to #
# reproduce, prepare derivative works, distribute copies to the public, #
# perform publicly and display publicly, and to permit others to do so. #
# #
###############################################################################
'''
This is a Unit Test for Rule ConfigureAppleSoftwareUpdate
@author: ekkehard j. koch
@change: 03/18/2013 Original Implementation
@change: 2016/02/10 roy Added sys.path.append for being able to unit test this
file as well as with the test harness.
'''
import unittest
import sys
sys.path.append("../../../..")
from src.tests.lib.RuleTestTemplate import RuleTest
from src.stonix_resources.CommandHelper import CommandHelper
from src.tests.lib.logdispatcher_mock import LogPriority
from src.stonix_resources.rules.DisableInteractiveStartup import DisableInteractiveStartup
class zzzTestRuleDisableInteractiveStartup(RuleTest):
def setUp(self):
RuleTest.setUp(self)
self.rule = DisableInteractiveStartup(self.config,
self.environ,
self.logdispatch,
self.statechglogger)
self.rulename = self.rule.rulename
self.rulenumber = self.rule.rulenumber
self.ch = CommandHelper(self.logdispatch)
def tearDown(self):
pass
def runTest(self):
self.simpleRuleTest()
def setConditionsForRule(self):
'''Configure system for the unit test
:param self: essential if you override this definition
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
success = True
return success
def checkReportForRule(self, pCompliance, pRuleSuccess):
'''check on whether report was correct
:param self: essential if you override this definition
:param pCompliance: the self.iscompliant value of rule
:param pRuleSuccess: did report run successfully
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
self.logdispatch.log(LogPriority.DEBUG, "pCompliance = " + \
str(pCompliance) + ".")
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " + \
str(pRuleSuccess) + ".")
success = True
return success
def checkFixForRule(self, pRuleSuccess):
'''check on whether fix was correct
:param self: essential if you override this definition
:param pRuleSuccess: did report run successfully
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " + \
str(pRuleSuccess) + ".")
success = True
return success
def checkUndoForRule(self, pRuleSuccess):
'''check on whether undo was correct
:param self: essential if you override this definition
:param pRuleSuccess: did report run successfully
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " + \
str(pRuleSuccess) + ".")
success = True
return success
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| gpl-2.0 |
nexiles/pyshop | pyshop/views/repository.py | 6 | 2042 | # -*- coding: utf-8 -*-
"""
PyShop Release File Download View.
"""
from pyramid.settings import asbool
from pyshop.models import DBSession, Release, ReleaseFile
def show_release_file(root, request):
"""
Download a release file.
Must be used with :func:`pyshop.helpers.download.renderer_factory`
to download the release file.
:return: download informations
:rtype: dict
"""
settings = request.registry.settings
whlify = asbool(settings.get('pyshop.mirror.wheelify', '0'))
session = DBSession()
f = ReleaseFile.by_id(session, int(request.matchdict['file_id']))
whlify = whlify and f.package_type == 'sdist'
filename = f.filename_whlified if whlify else f.filename
url = f.url
if url and url.startswith('http://pypi.python.org'):
url = 'https' + url[4:]
rv = {'url': url,
'filename': filename,
'original': f.filename,
'whlify': whlify
}
f.downloads += 1
f.release.downloads += 1
f.release.package.downloads += 1
session.add(f.release.package)
session.add(f.release)
session.add(f)
return rv
def show_external_release_file(root, request):
"""
Download a release from a download url from its package information.
Must be used with :func:`pyshop.helpers.download.renderer_factory`
to download the release file.
:return: download informations
:rtype: dict
"""
session = DBSession()
settings = request.registry.settings
whlify = asbool(settings.get('pyshop.mirror.wheelify', '0'))
release = Release.by_id(session, int(request.matchdict['release_id']))
filename = (release.whlify_download_url_file if whlify else
release.download_url_file)
rv = {'url': release.download_url,
'filename': filename,
'original': release.download_url_file,
'whlify': whlify
}
release.downloads += 1
release.package.downloads += 1
session.add(release.package)
session.add(release)
return rv
| bsd-3-clause |
leighpauls/k2cro4 | third_party/python_26/Lib/test/test_cProfile.py | 51 | 7028 | """Test suite for the cProfile module."""
import sys
from test.test_support import run_unittest, TESTFN, unlink
# rip off all interesting stuff from test_profile
import cProfile
from test.test_profile import ProfileTest, regenerate_expected_output
class CProfileTest(ProfileTest):
profilerclass = cProfile.Profile
# Issue 3895.
def test_bad_counter_during_dealloc(self):
import _lsprof
# Must use a file as StringIO doesn't trigger the bug.
sys.stderr = open(TESTFN, 'w')
try:
obj = _lsprof.Profiler(lambda: int)
obj.enable()
obj = _lsprof.Profiler(1)
obj.disable()
finally:
sys.stderr = sys.__stderr__
unlink(TESTFN)
def test_main():
run_unittest(CProfileTest)
def main():
if '-r' not in sys.argv:
test_main()
else:
regenerate_expected_output(__file__, CProfileTest)
# Don't remove this comment. Everything below it is auto-generated.
#--cut--------------------------------------------------------------------------
CProfileTest.expected_output['print_stats'] = """\
126 function calls (106 primitive calls) in 1.000 CPU seconds
Ordered by: standard name
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.000 0.000 1.000 1.000 <string>:1(<module>)
28 0.028 0.001 0.028 0.001 profilee.py:110(__getattr__)
1 0.270 0.270 1.000 1.000 profilee.py:25(testfunc)
23/3 0.150 0.007 0.170 0.057 profilee.py:35(factorial)
20 0.020 0.001 0.020 0.001 profilee.py:48(mul)
2 0.040 0.020 0.600 0.300 profilee.py:55(helper)
4 0.116 0.029 0.120 0.030 profilee.py:73(helper1)
2 0.000 0.000 0.140 0.070 profilee.py:84(helper2_indirect)
8 0.312 0.039 0.400 0.050 profilee.py:88(helper2)
8 0.064 0.008 0.080 0.010 profilee.py:98(subhelper)
12 0.000 0.000 0.012 0.001 {hasattr}
4 0.000 0.000 0.000 0.000 {method 'append' of 'list' objects}
1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}
8 0.000 0.000 0.000 0.000 {range}
4 0.000 0.000 0.000 0.000 {sys.exc_info}
"""
CProfileTest.expected_output['print_callers'] = """\
Ordered by: standard name
Function was called by...
ncalls tottime cumtime
<string>:1(<module>) <-
profilee.py:110(__getattr__) <- 16 0.016 0.016 profilee.py:98(subhelper)
12 0.012 0.012 {hasattr}
profilee.py:25(testfunc) <- 1 0.270 1.000 <string>:1(<module>)
profilee.py:35(factorial) <- 1 0.014 0.130 profilee.py:25(testfunc)
20/3 0.130 0.147 profilee.py:35(factorial)
2 0.006 0.040 profilee.py:84(helper2_indirect)
profilee.py:48(mul) <- 20 0.020 0.020 profilee.py:35(factorial)
profilee.py:55(helper) <- 2 0.040 0.600 profilee.py:25(testfunc)
profilee.py:73(helper1) <- 4 0.116 0.120 profilee.py:55(helper)
profilee.py:84(helper2_indirect) <- 2 0.000 0.140 profilee.py:55(helper)
profilee.py:88(helper2) <- 6 0.234 0.300 profilee.py:55(helper)
2 0.078 0.100 profilee.py:84(helper2_indirect)
profilee.py:98(subhelper) <- 8 0.064 0.080 profilee.py:88(helper2)
{hasattr} <- 4 0.000 0.004 profilee.py:73(helper1)
8 0.000 0.008 profilee.py:88(helper2)
{method 'append' of 'list' objects} <- 4 0.000 0.000 profilee.py:73(helper1)
{method 'disable' of '_lsprof.Profiler' objects} <-
{range} <- 8 0.000 0.000 profilee.py:98(subhelper)
{sys.exc_info} <- 4 0.000 0.000 profilee.py:73(helper1)
"""
CProfileTest.expected_output['print_callees'] = """\
Ordered by: standard name
Function called...
ncalls tottime cumtime
<string>:1(<module>) -> 1 0.270 1.000 profilee.py:25(testfunc)
profilee.py:110(__getattr__) ->
profilee.py:25(testfunc) -> 1 0.014 0.130 profilee.py:35(factorial)
2 0.040 0.600 profilee.py:55(helper)
profilee.py:35(factorial) -> 20/3 0.130 0.147 profilee.py:35(factorial)
20 0.020 0.020 profilee.py:48(mul)
profilee.py:48(mul) ->
profilee.py:55(helper) -> 4 0.116 0.120 profilee.py:73(helper1)
2 0.000 0.140 profilee.py:84(helper2_indirect)
6 0.234 0.300 profilee.py:88(helper2)
profilee.py:73(helper1) -> 4 0.000 0.004 {hasattr}
4 0.000 0.000 {method 'append' of 'list' objects}
4 0.000 0.000 {sys.exc_info}
profilee.py:84(helper2_indirect) -> 2 0.006 0.040 profilee.py:35(factorial)
2 0.078 0.100 profilee.py:88(helper2)
profilee.py:88(helper2) -> 8 0.064 0.080 profilee.py:98(subhelper)
8 0.000 0.008 {hasattr}
profilee.py:98(subhelper) -> 16 0.016 0.016 profilee.py:110(__getattr__)
8 0.000 0.000 {range}
{hasattr} -> 12 0.012 0.012 profilee.py:110(__getattr__)
{method 'append' of 'list' objects} ->
{method 'disable' of '_lsprof.Profiler' objects} ->
{range} ->
{sys.exc_info} ->
"""
if __name__ == "__main__":
main()
| bsd-3-clause |
codefisher/web_games | million/views.py | 1 | 5027 | import random
from django.shortcuts import render, get_object_or_404, redirect
from django.urls import reverse
from .models import Game, Question
# Create your views here.
def index(request):
games = Game.objects.all()
return render(request, "million/index.html", {
"games": games,
})
def game(request, game_id):
game = get_object_or_404(Game, pk=game_id)
questions = Question.objects.filter(game=game).order_by("value")
request.session['game'] = game.pk
request.session['question'] = 0
request.session['call'] = True
request.session['audience'] = True
request.session['half'] = True
request.session['half_question'] = None
request.session['half_wrong'] = None
return render(request, "million/game.html", {
"game": game,
"questions": questions,
})
def retire(request, game_id):
game = get_object_or_404(Game, pk=game_id)
questions = Question.objects.filter(game=game).order_by("value")
prize = questions[request.session['question'] - 1].dollars()
return render(request, "million/quit.html", {
"game": game,
"questions": questions,
"prize": prize,
})
def play(request, game_id):
game = get_object_or_404(Game, pk=game_id)
questions = Question.objects.filter(game=game).order_by("value")
if request.session.get('game') != game.pk:
request.session['game'] = game.pk
request.session['question'] = 0
request.session['call'] = True
request.session['audience'] = True
request.session['half'] = True
request.session['half_question'] = None
request.session['half_wrong'] = None
return redirect(reverse("million-question", kwargs={"question_id": questions[request.session.get('question')].pk}))
if request.method == 'POST':
question = get_object_or_404(Question, pk=request.POST.get('question'))
if 'answer' in request.POST:
answer = request.POST.get('answer')
correct = ((answer == 'one' and question.answer_one_correct)
or (answer == 'two' and question.answer_two_correct)
or (answer == 'three' and question.answer_three_correct)
or (answer == 'four' and question.answer_four_correct))
prize = "$0"
if correct:
request.session['question'] += 1
else:
index = request.session['question']
if index // 5:
prize = questions[(index // 5 * 5) - 1].dollars()
request.session['question'] = 0
if request.session['question'] >= len(questions):
return render(request, "million/million.html", {
"prize": questions[len(questions)-1].dollars()
})
return render(request, "million/result.html", {
"game": game,
"prize": prize,
"questions": questions,
"question": question,
"next_question": questions[request.session['question']],
"correct": correct,
"current": request.session['question'],
})
elif 'guess' in request.POST:
guess = request.POST.get('guess')
answer = getattr(question, "answer_%s" % guess)
return render(request, "million/guess.html", {
"game": game,
"questions": questions,
"question": question,
"guess": guess,
"answer": answer,
"current": request.session['question'],
})
try:
return redirect(reverse("million-question", kwargs={"question_id": questions[request.session.get('question')].pk}))
except IndexError:
return redirect(reverse("million-game", kwargs={"game_id": game.pk}))
def question(request, question_id, action=None):
question = get_object_or_404(Question, pk=question_id)
questions = Question.objects.filter(game=question.game).order_by("value")
if action == 'call':
request.session['call'] = False
if action == 'half':
request.session['half'] = False
request.session['half_question'] = question.pk
if action == 'audience':
request.session['audience'] = False
if request.session.get('half_question') == question.pk and request.session.get('half_wrong') is None:
request.session['half_wrong'] = random.choice([num for num in ['one', 'two', 'three', 'four'] if not getattr(question, 'answer_%s_correct' % num)])
return render(request, "million/question.html", {
"game": question.game,
"question": question,
"questions": questions,
"current": request.session['question'],
"half": request.session['half'],
"audience": request.session['audience'],
"call": request.session['call'],
"half_wrong": request.session.get('half_wrong') if request.session.get('half_question') == question.pk else None,
}) | mit |
espadrine/opera | chromium/src/tools/playback_benchmark/run.py | 51 | 1164 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests playback.
Prerequisites:
1. OpenSSL library - http://www.openssl.org/
2. Python interface to the OpenSSL library - https://launchpad.net/pyopenssl
Example usage:
python run.py -t <test_dir>
"""
from optparse import OptionParser
import sys
import playback_driver
import proxy_handler
def Run(options):
driver = playback_driver.PlaybackRequestHandler(options.test_dir)
httpd = proxy_handler.CreateServer(driver, options.port)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
def main():
parser = OptionParser()
parser.add_option("-t", "--test-dir", dest="test_dir",
help="directory containing recorded test data")
parser.add_option("-p", "--port", dest="port", type="int", default=8000)
options = parser.parse_args()[0]
if not options.test_dir:
raise Exception('please specify test directory')
Run(options)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
chidea/GoPythonDLLWrapper | bin/lib/test/pydocfodder.py | 203 | 6332 | """Something just to look at via pydoc."""
import types
class A_classic:
"A classic class."
def A_method(self):
"Method defined in A."
def AB_method(self):
"Method defined in A and B."
def AC_method(self):
"Method defined in A and C."
def AD_method(self):
"Method defined in A and D."
def ABC_method(self):
"Method defined in A, B and C."
def ABD_method(self):
"Method defined in A, B and D."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
class B_classic(A_classic):
"A classic class, derived from A_classic."
def AB_method(self):
"Method defined in A and B."
def ABC_method(self):
"Method defined in A, B and C."
def ABD_method(self):
"Method defined in A, B and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def B_method(self):
"Method defined in B."
def BC_method(self):
"Method defined in B and C."
def BD_method(self):
"Method defined in B and D."
def BCD_method(self):
"Method defined in B, C and D."
class C_classic(A_classic):
"A classic class, derived from A_classic."
def AC_method(self):
"Method defined in A and C."
def ABC_method(self):
"Method defined in A, B and C."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def BC_method(self):
"Method defined in B and C."
def BCD_method(self):
"Method defined in B, C and D."
def C_method(self):
"Method defined in C."
def CD_method(self):
"Method defined in C and D."
class D_classic(B_classic, C_classic):
"A classic class, derived from B_classic and C_classic."
def AD_method(self):
"Method defined in A and D."
def ABD_method(self):
"Method defined in A, B and D."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def BD_method(self):
"Method defined in B and D."
def BCD_method(self):
"Method defined in B, C and D."
def CD_method(self):
"Method defined in C and D."
def D_method(self):
"Method defined in D."
class A_new(object):
"A new-style class."
def A_method(self):
"Method defined in A."
def AB_method(self):
"Method defined in A and B."
def AC_method(self):
"Method defined in A and C."
def AD_method(self):
"Method defined in A and D."
def ABC_method(self):
"Method defined in A, B and C."
def ABD_method(self):
"Method defined in A, B and D."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def A_classmethod(cls, x):
"A class method defined in A."
A_classmethod = classmethod(A_classmethod)
def A_staticmethod():
"A static method defined in A."
A_staticmethod = staticmethod(A_staticmethod)
def _getx(self):
"A property getter function."
def _setx(self, value):
"A property setter function."
def _delx(self):
"A property deleter function."
A_property = property(fdel=_delx, fget=_getx, fset=_setx,
doc="A sample property defined in A.")
A_int_alias = int
class B_new(A_new):
"A new-style class, derived from A_new."
def AB_method(self):
"Method defined in A and B."
def ABC_method(self):
"Method defined in A, B and C."
def ABD_method(self):
"Method defined in A, B and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def B_method(self):
"Method defined in B."
def BC_method(self):
"Method defined in B and C."
def BD_method(self):
"Method defined in B and D."
def BCD_method(self):
"Method defined in B, C and D."
class C_new(A_new):
"A new-style class, derived from A_new."
def AC_method(self):
"Method defined in A and C."
def ABC_method(self):
"Method defined in A, B and C."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def BC_method(self):
"Method defined in B and C."
def BCD_method(self):
"Method defined in B, C and D."
def C_method(self):
"Method defined in C."
def CD_method(self):
"Method defined in C and D."
class D_new(B_new, C_new):
"""A new-style class, derived from B_new and C_new.
"""
def AD_method(self):
"Method defined in A and D."
def ABD_method(self):
"Method defined in A, B and D."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def BD_method(self):
"Method defined in B and D."
def BCD_method(self):
"Method defined in B, C and D."
def CD_method(self):
"Method defined in C and D."
def D_method(self):
"Method defined in D."
class FunkyProperties(object):
"""From SF bug 472347, by Roeland Rengelink.
Property getters etc may not be vanilla functions or methods,
and this used to make GUI pydoc blow up.
"""
def __init__(self):
self.desc = {'x':0}
class get_desc:
def __init__(self, attr):
self.attr = attr
def __call__(self, inst):
print('Get called', self, inst)
return inst.desc[self.attr]
class set_desc:
def __init__(self, attr):
self.attr = attr
def __call__(self, inst, val):
print('Set called', self, inst, val)
inst.desc[self.attr] = val
class del_desc:
def __init__(self, attr):
self.attr = attr
def __call__(self, inst):
print('Del called', self, inst)
del inst.desc[self.attr]
x = property(get_desc('x'), set_desc('x'), del_desc('x'), 'prop x')
submodule = types.ModuleType(__name__ + '.submodule',
"""A submodule, which should appear in its parent's summary""")
| mit |
Zopieux/py3status | py3status/modules/xrandr.py | 8 | 13786 | # -*- coding: utf-8 -*-
"""
Control your screen(s) layout easily.
This modules allows you to handle your screens outputs directly from your bar!
- Detect and propose every possible screen combinations
- Switch between combinations using click events and mouse scroll
- Activate the screen or screen combination on a single click
- It will detect any newly connected or removed screen automatically
For convenience, this module also proposes some added features:
- Dynamic parameters for POSITION and WORKSPACES assignment (see below)
- Automatic fallback to a given screen or screen combination when no more
screen is available (handy for laptops)
- Automatically apply this screen combination on start: no need for xorg!
- Automatically move workspaces to screens when they are available
Configuration parameters:
- cache_timeout: how often to (re)detect the outputs
- fallback: when the current output layout is not available anymore,
fallback to this layout if available. This is very handy if you
have a laptop and switched to an external screen for presentation
and want to automatically fallback to your laptop screen when you
disconnect the external screen.
- force_on_start: switch to the given combination mode if available
when the module starts (saves you from having to configure xorg)
- format_clone: string used to display a 'clone' combination
- format_extend: string used to display a 'extend' combination
Dynamic configuration parameters:
- <OUTPUT>_pos: apply the given position to the OUTPUT
Example: DP1_pos = "-2560x0"
Example: DP1_pos = "left-of LVDS1"
Example: DP1_pos = "right-of eDP1"
- <OUTPUT>_workspaces: comma separated list of workspaces to move to
the given OUTPUT when it is activated
Example: DP1_workspaces = "1,2,3"
Example config:
xrandr {
force_on_start = "eDP1+DP1"
DP1_pos = "left-of eDP1"
VGA_workspaces = "7"
}
@author ultrabug
"""
import shlex
from collections import deque
from collections import OrderedDict
from itertools import combinations
from subprocess import call, Popen, PIPE
from syslog import syslog, LOG_INFO
from time import sleep, time
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 10
fallback = True
fixed_width = True
force_on_start = None
format_clone = '='
format_extend = '+'
def __init__(self):
"""
"""
self.active_comb = None
self.active_layout = None
self.active_mode = 'extend'
self.displayed = None
self.max_width = 0
def _get_layout(self):
"""
Get the outputs layout from xrandr and try to detect the
currently active layout as best as we can on start.
"""
connected = list()
active_layout = list()
disconnected = list()
layout = OrderedDict(
{
'connected': OrderedDict(),
'disconnected': OrderedDict()
}
)
current = Popen(['xrandr'], stdout=PIPE)
for line in current.stdout.readlines():
try:
# python3
line = line.decode()
except:
pass
try:
s = line.split(' ')
if s[1] == 'connected':
output, state = s[0], s[1]
if s[2][0] == '(':
mode, infos = None, ' '.join(s[2:]).strip('\n')
else:
mode, infos = s[2], ' '.join(s[3:]).strip('\n')
active_layout.append(output)
connected.append(output)
elif s[1] == 'disconnected':
output, state = s[0], s[1]
mode, infos = None, ' '.join(s[2:]).strip('\n')
disconnected.append(output)
else:
continue
except Exception as err:
syslog(LOG_INFO, 'xrandr error="{}"'.format(err))
else:
layout[state][output] = {
'infos': infos,
'mode': mode,
'state': state
}
# initialize the active layout
if self.active_layout is None:
self.active_comb = tuple(active_layout)
self.active_layout = self._get_string_and_set_width(
tuple(active_layout),
self.active_mode
)
return layout
def _set_available_combinations(self):
"""
Generate all connected outputs combinations and
set the max display width while iterating.
"""
available_combinations = set()
combinations_map = {}
self.max_width = 0
for output in range(len(self.layout['connected'])+1):
for comb in combinations(self.layout['connected'], output):
if comb:
for mode in ['clone', 'extend']:
string = self._get_string_and_set_width(comb, mode)
if len(comb) == 1:
combinations_map[string] = (comb, None)
else:
combinations_map[string] = (comb, mode)
available_combinations.add(string)
self.available_combinations = deque(available_combinations)
self.combinations_map = combinations_map
def _get_string_and_set_width(self, combination, mode):
"""
Construct the string to be displayed and record the max width.
"""
show = '{}'.format(self._separator(mode)).join(combination)
show = show.rstrip('{}'.format(self._separator(mode)))
self.max_width = max([self.max_width, len(show)])
return show
def _choose_what_to_display(self, force_refresh=False):
"""
Choose what combination to display on the bar.
By default we try to display the active layout on the first run, else
we display the last selected combination.
"""
for _ in range(len(self.available_combinations)):
if (
self.displayed is None and
self.available_combinations[0] == self.active_layout
):
self.displayed = self.available_combinations[0]
break
else:
if self.displayed == self.available_combinations[0]:
break
else:
self.available_combinations.rotate(1)
else:
if force_refresh:
self.displayed = self.available_combinations[0]
else:
syslog(
LOG_INFO,
'xrandr error="displayed combination is not available"'
)
def _center(self, s):
"""
Center the given string on the detected max width.
"""
fmt = '{:^%d}' % self.max_width
return fmt.format(s)
def _apply(self, force=False):
"""
Call xrandr and apply the selected (displayed) combination mode.
"""
if self.displayed == self.active_layout and not force:
# no change, do nothing
return
combination, mode = self.combinations_map.get(
self.displayed, (None, None)
)
if combination is None and mode is None:
# displayed combination cannot be activated, ignore
return
cmd = 'xrandr'
outputs = list(self.layout['connected'].keys())
outputs += list(self.layout['disconnected'].keys())
previous_output = None
for output in outputs:
cmd += ' --output {}'.format(output)
#
if output in combination:
pos = getattr(self, '{}_pos'.format(output), '0x0')
#
if mode == 'clone' and previous_output is not None:
cmd += ' --auto --same-as {}'.format(previous_output)
else:
if 'left-of' in pos:
cmd += ' --auto --{} --rotate normal'.format(pos)
elif 'right-of' in pos:
cmd += ' --auto --{} --rotate normal'.format(pos)
else:
cmd += ' --auto --pos {} --rotate normal'.format(pos)
previous_output = output
else:
cmd += ' --off'
#
code = call(shlex.split(cmd))
if code == 0:
self.active_comb = combination
self.active_layout = self.displayed
self.active_mode = mode
syslog(LOG_INFO, 'command "{}" exit code {}'.format(cmd, code))
# move workspaces to outputs as configured
self._apply_workspaces(combination, mode)
def _apply_workspaces(self, combination, mode):
"""
Allows user to force move a comma separated list of workspaces to the
given output when it's activated.
Example:
- DP1_workspaces = "1,2,3"
"""
if len(combination) > 1 and mode == 'extend':
sleep(3)
for output in combination:
workspaces = getattr(
self, '{}_workspaces'.format(output), '').split(',')
for workspace in workspaces:
if not workspace:
continue
# switch to workspace
cmd = 'i3-msg workspace "{}"'.format(workspace)
call(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
# move it to output
cmd = 'i3-msg move workspace to output "{}"'.format(output)
call(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
# log this
syslog(
LOG_INFO,
'moved workspace {} to output {}'.format(
workspace, output)
)
def _refresh_py3status(self):
"""
Send a SIGUSR1 signal to py3status to force a bar refresh.
"""
call(shlex.split('killall -s USR1 py3status'))
def _fallback_to_available_output(self):
"""
Fallback to the first available output when the active layout
was composed of only one output.
This allows us to avoid cases where you get stuck with a black sreen
on your laptop by switching back to the integrated screen
automatically !
"""
if len(self.active_comb) == 1:
self._choose_what_to_display(force_refresh=True)
self._apply()
self._refresh_py3status()
def _force_force_on_start(self):
"""
Force the user configured mode on start.
"""
if self.force_on_start in self.available_combinations:
self.displayed = self.force_on_start
self.force_on_start = None
self._choose_what_to_display(force_refresh=True)
self._apply(force=True)
self._refresh_py3status()
def _separator(self, mode):
"""
Return the separator for the given mode.
"""
if mode == 'extend':
return self.format_extend
if mode == 'clone':
return self.format_clone
def _switch_selection(self, direction):
self.available_combinations.rotate(direction)
self.displayed = self.available_combinations[0]
def on_click(self, i3s_output_list, i3s_config, event):
"""
Click events
- left click & scroll up/down: switch between modes
- right click: apply selected mode
- middle click: force refresh of available modes
"""
button = event['button']
if button == 4:
self._switch_selection(-1)
if button in [1, 5]:
self._switch_selection(1)
if button == 2:
self._choose_what_to_display(force_refresh=True)
if button == 3:
self._apply()
def xrandr(self, i3s_output_list, i3s_config):
"""
This is the main py3status method, it will orchestrate what's being
displayed on the bar.
"""
self.layout = self._get_layout()
self._set_available_combinations()
self._choose_what_to_display()
if self.fixed_width is True:
full_text = self._center(self.displayed)
else:
full_text = self.displayed
response = {
'cached_until': time() + self.cache_timeout,
'full_text': full_text
}
# coloration
if self.displayed == self.active_layout:
response['color'] = i3s_config['color_good']
elif self.displayed not in self.available_combinations:
response['color'] = i3s_config['color_bad']
# force default layout setup
if self.force_on_start is not None:
sleep(1)
self._force_force_on_start()
# fallback detection
if self.active_layout not in self.available_combinations:
response['color'] = i3s_config['color_degraded']
if self.fallback is True:
self._fallback_to_available_output()
return response
if __name__ == "__main__":
"""
Test this module by calling it directly.
"""
x = Py3status()
config = {
'color_bad': '#FF0000',
'color_degraded': '#FFFF00',
'color_good': '#00FF00'
}
while True:
print(x.xrandr([], config))
sleep(1)
| bsd-3-clause |
HonzaKral/django | django/contrib/sessions/backends/cached_db.py | 24 | 2865 | """
Cached, database-backed sessions.
"""
import logging
from django.conf import settings
from django.contrib.sessions.backends.db import SessionStore as DBStore
from django.core.cache import caches
from django.core.exceptions import SuspiciousOperation
from django.utils import timezone
from django.utils.encoding import force_text
KEY_PREFIX = "django.contrib.sessions.cached_db"
class SessionStore(DBStore):
"""
Implements cached, database backed sessions.
"""
def __init__(self, session_key=None):
self._cache = caches[settings.SESSION_CACHE_ALIAS]
super(SessionStore, self).__init__(session_key)
@property
def cache_key(self):
return KEY_PREFIX + self._get_or_create_session_key()
def load(self):
try:
data = self._cache.get(self.cache_key)
except Exception:
# Some backends (e.g. memcache) raise an exception on invalid
# cache keys. If this happens, reset the session. See #17810.
data = None
if data is None:
# Duplicate DBStore.load, because we need to keep track
# of the expiry date to set it properly in the cache.
try:
s = Session.objects.get(
session_key=self.session_key,
expire_date__gt=timezone.now()
)
data = self.decode(s.session_data)
self._cache.set(self.cache_key, data,
self.get_expiry_age(expiry=s.expire_date))
except (Session.DoesNotExist, SuspiciousOperation) as e:
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
self.create()
data = {}
return data
def exists(self, session_key):
if (KEY_PREFIX + session_key) in self._cache:
return True
return super(SessionStore, self).exists(session_key)
def save(self, must_create=False):
super(SessionStore, self).save(must_create)
self._cache.set(self.cache_key, self._session, self.get_expiry_age())
def delete(self, session_key=None):
super(SessionStore, self).delete(session_key)
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
self._cache.delete(KEY_PREFIX + session_key)
def flush(self):
"""
Removes the current session data from the database and regenerates the
key.
"""
self.clear()
self.delete(self.session_key)
self._session_key = None
# At bottom to avoid circular import
from django.contrib.sessions.models import Session # isort:skip
| bsd-3-clause |
samba-team/samba | python/samba/tests/smb.py | 2 | 9701 | # -*- coding: utf-8 -*-
# Unix SMB/CIFS implementation. Tests for smb manipulation
# Copyright (C) David Mulder <dmulder@suse.com> 2018
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import samba
import os
import random
import sys
from samba import NTSTATUSError
from samba.ntstatus import (NT_STATUS_OBJECT_NAME_NOT_FOUND,
NT_STATUS_OBJECT_PATH_NOT_FOUND)
from samba.samba3 import libsmb_samba_internal as libsmb
from samba.samba3 import param as s3param
PY3 = sys.version_info[0] == 3
realm = os.environ.get('REALM')
domain_dir = realm.lower() + '/'
test_contents = 'abcd' * 256
utf_contents = u'Süßigkeiten Äpfel ' * 128
test_literal_bytes_embed_nulls = b'\xff\xfe\x14\x61\x00\x00\x62\x63\x64' * 256
binary_contents = b'\xff\xfe'
binary_contents = binary_contents + "Hello cruel world of python3".encode('utf8') * 128
test_dir = os.path.join(domain_dir, 'testing_%d' % random.randint(0, 0xFFFF))
test_file = os.path.join(test_dir, 'testing').replace('/', '\\')
class SMBTests(samba.tests.TestCase):
def setUp(self):
super(SMBTests, self).setUp()
self.server = os.environ["SERVER"]
creds = self.insta_creds(template=self.get_credentials())
# create an SMB connection to the server
lp = s3param.get_context()
lp.load(os.getenv("SMB_CONF_PATH"))
self.smb_conn = libsmb.Conn(self.server, "sysvol", lp, creds)
self.smb_conn.mkdir(test_dir)
def tearDown(self):
super(SMBTests, self).tearDown()
try:
self.smb_conn.deltree(test_dir)
except:
pass
def test_list(self):
# check a basic listing returns the items we expect
ls = [f['name'] for f in self.smb_conn.list(domain_dir)]
self.assertIn('scripts', ls,
msg='"scripts" directory not found in sysvol')
self.assertIn('Policies', ls,
msg='"Policies" directory not found in sysvol')
self.assertNotIn('..', ls,
msg='Parent (..) found in directory listing')
self.assertNotIn('.', ls,
msg='Current dir (.) found in directory listing')
# using a '*' mask should be the same as using no mask
ls_wildcard = [f['name'] for f in self.smb_conn.list(domain_dir, "*")]
self.assertEqual(ls, ls_wildcard)
# applying a mask should only return items that match that mask
ls_pol = [f['name'] for f in self.smb_conn.list(domain_dir, "Pol*")]
expected = ["Policies"]
self.assertEqual(ls_pol, expected)
# each item in the listing is a has with expected keys
expected_keys = ['attrib', 'mtime', 'name', 'short_name', 'size']
for item in self.smb_conn.list(domain_dir):
for key in expected_keys:
self.assertIn(key, item,
msg="Key '%s' not in listing '%s'" % (key, item))
def test_deltree(self):
"""The smb.deltree API should delete files and sub-dirs"""
# create some test sub-dirs
dirpaths = []
empty_dirs = []
cur_dir = test_dir
for subdir in ["subdir-X", "subdir-Y", "subdir-Z"]:
path = self.make_sysvol_path(cur_dir, subdir)
self.smb_conn.mkdir(path)
dirpaths.append(path)
cur_dir = path
# create another empty dir just for kicks
path = self.make_sysvol_path(cur_dir, "another")
self.smb_conn.mkdir(path)
empty_dirs.append(path)
# create some files in these directories
filepaths = []
for subdir in dirpaths:
for i in range(1, 4):
contents = "I'm file {0} in dir {1}!".format(i, subdir)
path = self.make_sysvol_path(subdir, "file-{0}.txt".format(i))
self.smb_conn.savefile(path, test_contents.encode('utf8'))
filepaths.append(path)
# sanity-check these dirs/files exist
for subdir in dirpaths + empty_dirs:
self.assertTrue(self.smb_conn.chkpath(subdir),
"Failed to create {0}".format(subdir))
for path in filepaths:
self.assertTrue(self.file_exists(path),
"Failed to create {0}".format(path))
# try using deltree to remove a single empty directory
path = empty_dirs.pop(0)
self.smb_conn.deltree(path)
self.assertFalse(self.smb_conn.chkpath(path),
"Failed to delete {0}".format(path))
# try using deltree to remove a single file
path = filepaths.pop(0)
self.smb_conn.deltree(path)
self.assertFalse(self.file_exists(path),
"Failed to delete {0}".format(path))
# delete the top-level dir
self.smb_conn.deltree(test_dir)
# now check that all the dirs/files are no longer there
for subdir in dirpaths + empty_dirs:
self.assertFalse(self.smb_conn.chkpath(subdir),
"Failed to delete {0}".format(subdir))
for path in filepaths:
self.assertFalse(self.file_exists(path),
"Failed to delete {0}".format(path))
def file_exists(self, filepath):
"""Returns whether a regular file exists (by trying to open it)"""
try:
self.smb_conn.loadfile(filepath)
exists = True;
except NTSTATUSError as err:
if (err.args[0] == NT_STATUS_OBJECT_NAME_NOT_FOUND or
err.args[0] == NT_STATUS_OBJECT_PATH_NOT_FOUND):
exists = False
else:
raise err
return exists
def test_unlink(self):
"""
The smb.unlink API should delete file
"""
# create the test file
self.assertFalse(self.file_exists(test_file))
self.smb_conn.savefile(test_file, binary_contents)
self.assertTrue(self.file_exists(test_file))
# delete it and check that it's gone
self.smb_conn.unlink(test_file)
self.assertFalse(self.file_exists(test_file))
def test_chkpath(self):
"""Tests .chkpath determines whether or not a directory exists"""
self.assertTrue(self.smb_conn.chkpath(test_dir))
# should return False for a non-existent directory
bad_dir = self.make_sysvol_path(test_dir, 'dont_exist')
self.assertFalse(self.smb_conn.chkpath(bad_dir))
# should return False for files (because they're not directories)
self.smb_conn.savefile(test_file, binary_contents)
self.assertFalse(self.smb_conn.chkpath(test_file))
# check correct result after creating and then deleting a new dir
new_dir = self.make_sysvol_path(test_dir, 'test-new')
self.smb_conn.mkdir(new_dir)
self.assertTrue(self.smb_conn.chkpath(new_dir))
self.smb_conn.rmdir(new_dir)
self.assertFalse(self.smb_conn.chkpath(new_dir))
def test_save_load_text(self):
self.smb_conn.savefile(test_file, test_contents.encode('utf8'))
contents = self.smb_conn.loadfile(test_file)
self.assertEqual(contents.decode('utf8'), test_contents,
msg='contents of test file did not match what was written')
# check we can overwrite the file with new contents
new_contents = 'wxyz' * 128
self.smb_conn.savefile(test_file, new_contents.encode('utf8'))
contents = self.smb_conn.loadfile(test_file)
self.assertEqual(contents.decode('utf8'), new_contents,
msg='contents of test file did not match what was written')
# with python2 this will save/load str type (with embedded nulls)
# with python3 this will save/load bytes type
def test_save_load_string_bytes(self):
self.smb_conn.savefile(test_file, test_literal_bytes_embed_nulls)
contents = self.smb_conn.loadfile(test_file)
self.assertEqual(contents, test_literal_bytes_embed_nulls,
msg='contents of test file did not match what was written')
# python3 only this will save/load unicode
def test_save_load_utfcontents(self):
if PY3:
self.smb_conn.savefile(test_file, utf_contents.encode('utf8'))
contents = self.smb_conn.loadfile(test_file)
self.assertEqual(contents.decode('utf8'), utf_contents,
msg='contents of test file did not match what was written')
# with python2 this will save/load str type
# with python3 this will save/load bytes type
def test_save_binary_contents(self):
self.smb_conn.savefile(test_file, binary_contents)
contents = self.smb_conn.loadfile(test_file)
self.assertEqual(contents, binary_contents,
msg='contents of test file did not match what was written')
def make_sysvol_path(self, dirpath, filename):
# return the dir + filename as a sysvol path
return os.path.join(dirpath, filename).replace('/', '\\')
| gpl-3.0 |
hellhovnd/tennisranking | tennisranking/project/wsgi.py | 1 | 1449 | # -*- coding:utf-8 -*-
'''
WSGI config for tennisranking project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
'''
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "tennisranking.settings"
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| bsd-2-clause |
NLHEALTHCARE/PYELT | tests/old/unit_tests_rob/test03r_domain.py | 1 | 1900 | import unittest
from tests.unit_tests_rob import _domain_rob, _domain_rob_unittest
from tests.unit_tests_rob.global_test_suite import get_global_test_pipeline, execute_sql, init_db
class TestCase_Domain(unittest.TestCase):
def setUp(self):
self.pipeline = get_global_test_pipeline()
self.pipe = self.pipeline.get_or_create_pipe('test_system')
p = self.pipe
def test01_domain_registration(self):
self.pipe.register_domain(_domain_rob)
self.pipeline.run()
self.assertNotEquals(len(self.pipe.domain_modules), 0)
# def test02_domain_number_of_attributes_correct(self):
# sql = """SELECT COUNT(COLUMN_NAME) FROM INFORMATION_SCHEMA.COLUMNS
# WHERE TABLE_CATALOG = 'pyelt_unittests' AND TABLE_SCHEMA = 'dv'
# AND TABLE_NAME = 'zorgverlener_sat_personalia'"""
# result = execute_sql(sql)
# result = result[0][0]
# self.assertEqual(result, 14, 'Ik verwachte 14 kolommen; is domain_rob misschien recent veranderd?')
# nieuw attribute toegevoegd aan zorgverlener_sat_personalia:
def test03_domain_new_attribute_added(self):
self.pipe.register_domain(_domain_rob_unittest)
self.pipeline.run()
sql = """SELECT COUNT(COLUMN_NAME) FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_CATALOG = 'pyelt_unittests' AND TABLE_SCHEMA = 'dv'
AND TABLE_NAME = 'zorgverlener_sat_personalia'"""
result = execute_sql(sql)
result = result[0][0]
self.assertEqual(result,15, 'Ik verwachte 15 kolommen; is domain_rob_unittest misschien recent veranderd?')
# # #
if __name__ == '__main__':
init_db()
unittest.main()
# todo[rob]: bovenstaande is gewijzigd. Je kunt nu wel attributen toevoegen aan een sat. Kun je de test herschrijven zodat hij kijkt of er een nieuw veld bij is gekomen?
| gpl-3.0 |
loopCM/chromium | chrome/test/functional/special_tabs.py | 59 | 13394 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import pyauto_functional # Must be imported before pyauto
import pyauto
import test_utils
class SpecialTabsTest(pyauto.PyUITest):
"""TestCase for Special Tabs like about:version, chrome://history, etc."""
@staticmethod
def GetSpecialAcceleratorTabs():
"""Get a dict of accelerators and corresponding tab titles."""
ret = {
pyauto.IDC_SHOW_HISTORY: 'History',
pyauto.IDC_MANAGE_EXTENSIONS: 'Extensions',
pyauto.IDC_SHOW_DOWNLOADS: 'Downloads',
}
return ret
special_url_redirects = {
'about:': 'chrome://version',
'about:about': 'chrome://about',
'about:appcache-internals': 'chrome://appcache-internals',
'about:credits': 'chrome://credits',
'about:dns': 'chrome://dns',
'about:histograms': 'chrome://histograms',
'about:plugins': 'chrome://plugins',
'about:sync': 'chrome://sync-internals',
'about:sync-internals': 'chrome://sync-internals',
'about:version': 'chrome://version',
}
special_url_tabs = {
'chrome://about': { 'title': 'Chrome URLs' },
'chrome://appcache-internals': { 'title': 'AppCache Internals' },
'chrome://blob-internals': { 'title': 'Blob Storage Internals' },
'chrome://feedback': {},
'chrome://chrome-urls': { 'title': 'Chrome URLs' },
'chrome://crashes': { 'title': 'Crashes' },
'chrome://credits': { 'title': 'Credits' },
'chrome://downloads': { 'title': 'Downloads' },
'chrome://dns': { 'title': 'About DNS' },
'chrome://extensions': { 'title': 'Extensions' },
'chrome://flags': {},
'chrome://flash': {},
'chrome://gpu-internals': {},
'chrome://histograms': { 'title': 'About Histograms' },
'chrome://history': { 'title': 'History' },
'chrome://inspect': { 'title': 'Inspect with Chrome Developer Tools' },
'chrome://media-internals': { 'title': 'Media Internals' },
'chrome://memory-redirect': { 'title': 'About Memory' },
'chrome://net-internals': {},
'chrome://net-internals/help.html': {},
'chrome://newtab': { 'title': 'New Tab', 'CSP': False },
'chrome://plugins': { 'title': 'Plug-ins' },
'chrome://settings': { 'title': 'Settings' },
'chrome://settings/autofill': { 'title': 'Settings - Autofill settings' },
'chrome://settings/clearBrowserData':
{ 'title': 'Settings - Clear browsing data' },
'chrome://settings/content': { 'title': 'Settings - Content settings' },
'chrome://settings/languages':
{ 'title': 'Settings - Languages' },
'chrome://settings/passwords': { 'title': 'Settings - Passwords' },
'chrome://stats': {},
'chrome://sync': { 'title': 'Sync Internals' },
'chrome://sync-internals': { 'title': 'Sync Internals' },
'chrome://terms': {},
'chrome://version': { 'title': 'About Version' },
'chrome://view-http-cache': {},
'chrome://webrtc-internals': { 'title': 'WebRTC Internals' },
}
broken_special_url_tabs = {
# crashed under debug when invoked from location bar (bug 88223).
'chrome://devtools': { 'CSP': False },
# returns "not available" despite having an URL constant.
'chrome://dialog': { 'CSP': False },
# separate window on mac, PC untested, not implemented elsewhere.
'chrome://ipc': { 'CSP': False },
# race against redirects via meta-refresh.
'chrome://memory': { 'CSP': False },
}
chromeos_special_url_tabs = {
'chrome://choose-mobile-network': { 'title': 'undefined', 'CSP': True },
'chrome://flags': { 'CSP': True },
'chrome://imageburner': { 'title':'Create a Recovery Media', 'CSP': True },
'chrome://keyboardoverlay': { 'title': 'Keyboard Overlay', 'CSP': True },
'chrome://network': { 'title': 'About Network' },
'chrome://os-credits': { 'title': 'Credits', 'CSP': False },
'chrome://proxy-settings': { 'CSP': False },
'chrome://register': { 'CSP': False },
'chrome://settings/languages':
{ 'title': 'Settings - Languages and input' },
'chrome://sim-unlock': { 'title': 'Enter SIM card PIN', 'CSP': False },
'chrome://system': { 'title': 'About System', 'CSP': False },
# OVERRIDE - title and page different on CrOS
'chrome://settings/accounts': { 'title': 'Settings - Users' },
}
broken_chromeos_special_url_tabs = {
# returns "not available" page on chromeos=1 linux but has an URL constant.
'chrome://activationmessage': { 'CSP': False },
'chrome://cloudprintresources': { 'CSP': False },
'chrome://cloudprintsetup': { 'CSP': False },
'chrome://collected-cookies': { 'CSP': False },
'chrome://constrained-test': { 'CSP': False },
'chrome://enterprise-enrollment': { 'CSP': False },
'chrome://http-auth': { 'CSP': False },
'chrome://login-container': { 'CSP': False },
'chrome://media-player': { 'CSP': False },
'chrome://screenshots': { 'CSP': False },
'chrome://slideshow': { 'CSP': False },
'chrome://syncresources': { 'CSP': False },
'chrome://theme': { 'CSP': False },
'chrome://view-http-cache': { 'CSP': False },
# crashes on chromeos=1 on linux, possibly missing real CrOS features.
'chrome://cryptohome': { 'CSP': False},
'chrome://mobilesetup': { 'CSP': False },
'chrome://print': { 'CSP': False },
}
linux_special_url_tabs = {
'chrome://linux-proxy-config': { 'title': 'Proxy Configuration Help' },
'chrome://tcmalloc': { 'title': 'tcmalloc stats' },
'chrome://sandbox': { 'title': 'Sandbox Status' },
}
broken_linux_special_url_tabs = {}
mac_special_url_tabs = {
'chrome://settings/languages': { 'title': 'Settings - Languages' },
}
broken_mac_special_url_tabs = {}
win_special_url_tabs = {
'chrome://conflicts': {},
}
broken_win_special_url_tabs = {
# Sync on windows badly broken at the moment.
'chrome://sync': {},
}
google_special_url_tabs = {
# OVERRIDE - different title for Google Chrome vs. Chromium.
'chrome://terms': {
'title': 'Google Chrome Terms of Service',
},
}
broken_google_special_url_tabs = {}
google_chromeos_special_url_tabs = {
# OVERRIDE - different title for Google Chrome OS vs. Chromium OS.
'chrome://terms': {
'title': 'Google Chrome OS Terms',
},
}
broken_google_chromeos_special_url_tabs = {}
google_win_special_url_tabs = {}
broken_google_win_special_url_tabs = {}
google_mac_special_url_tabs = {}
broken_google_mac_special_url_tabs = {}
google_linux_special_url_tabs = {}
broken_google_linux_special_url_tabs = {}
def _VerifyAppCacheInternals(self):
"""Confirm about:appcache-internals contains expected content for Caches.
Also confirms that the about page populates Application Caches."""
# Navigate to html page to activate DNS prefetching.
self.NavigateToURL('http://futtta.be/html5/offline.php')
# Wait for page to load and display sucess or fail message.
self.WaitUntil(
lambda: self.GetDOMValue('document.getElementById("status").innerHTML'),
expect_retval='cached')
self.TabGoBack()
test_utils.StringContentCheck(
self, self.GetTabContents(),
['Manifest',
'http://futtta.be/html5/manifest.php'],
[])
def _VerifyAboutDNS(self):
"""Confirm about:dns contains expected content related to DNS info.
Also confirms that prefetching DNS records propogate."""
# Navigate to a page to activate DNS prefetching.
self.NavigateToURL('http://www.google.com')
self.TabGoBack()
test_utils.StringContentCheck(self, self.GetTabContents(),
['Host name', 'How long ago', 'Motivation'],
[])
def _GetPlatformSpecialURLTabs(self):
tabs = self.special_url_tabs.copy()
broken_tabs = self.broken_special_url_tabs.copy()
if self.IsChromeOS():
tabs.update(self.chromeos_special_url_tabs)
broken_tabs.update(self.broken_chromeos_special_url_tabs)
elif self.IsLinux():
tabs.update(self.linux_special_url_tabs)
broken_tabs.update(self.broken_linux_special_url_tabs)
elif self.IsMac():
tabs.update(self.mac_special_url_tabs)
broken_tabs.update(self.broken_mac_special_url_tabs)
elif self.IsWin():
tabs.update(self.win_special_url_tabs)
broken_tabs.update(self.broken_win_special_url_tabs)
for key, value in broken_tabs.iteritems():
if key in tabs:
del tabs[key]
broken_tabs = {}
if self.GetBrowserInfo()['properties']['branding'] == 'Google Chrome':
tabs.update(self.google_special_url_tabs)
broken_tabs.update(self.broken_google_special_url_tabs)
if self.IsChromeOS():
tabs.update(self.google_chromeos_special_url_tabs)
broken_tabs.update(self.broken_google_chromeos_special_url_tabs)
elif self.IsLinux():
tabs.update(self.google_linux_special_url_tabs)
broken_tabs.update(self.broken_google_linux_special_url_tabs)
elif self.IsMac():
tabs.update(self.google_mac_special_url_tabs)
broken_tabs.update(self.broken_google_mac_special_url_tabs)
elif self.IsWin():
tabs.update(self.google_win_special_url_tabs)
broken_tabs.update(self.broken_google_win_special_url_tabs)
for key, value in broken_tabs.iteritems():
if key in tabs:
del tabs[key]
return tabs
def testSpecialURLRedirects(self):
"""Test that older about: URLs are implemented by newer chrome:// URLs.
The location bar may not get updated in all cases, so checking the
tab URL is misleading, instead check for the same contents as the
chrome:// page."""
tabs = self._GetPlatformSpecialURLTabs()
for url, redirect in self.special_url_redirects.iteritems():
if redirect in tabs:
logging.debug('Testing redirect from %s to %s.' % (url, redirect))
self.NavigateToURL(url)
self.assertEqual(self.special_url_tabs[redirect]['title'],
self.GetActiveTabTitle())
def testSpecialURLTabs(self):
"""Test special tabs created by URLs like chrome://downloads,
chrome://settings/extensionSettings, chrome://history etc.
Also ensures they specify content-security-policy and not inline
scripts for those pages that are expected to do so. Patches which
break this test by including new inline javascript are security
vulnerabilities and should be reverted."""
tabs = self._GetPlatformSpecialURLTabs()
for url, properties in tabs.iteritems():
logging.debug('Testing URL %s.' % url)
self.NavigateToURL(url)
expected_title = 'title' in properties and properties['title'] or url
actual_title = self.GetActiveTabTitle()
self.assertTrue(self.WaitUntil(
lambda: self.GetActiveTabTitle(), expect_retval=expected_title),
msg='Title did not match for %s. Expected: %s. Got %s' % (
url, expected_title, self.GetActiveTabTitle()))
include_list = []
exclude_list = []
no_csp = 'CSP' in properties and not properties['CSP']
if no_csp:
exclude_list.extend(['Content-Security-Policy'])
else:
exclude_list.extend(['<script>', 'onclick=', 'onload=',
'onchange=', 'onsubmit=', 'javascript:'])
if 'includes' in properties:
include_list.extend(properties['includes'])
if 'excludes' in properties:
exclude_list.extend(properties['exlcudes'])
test_utils.StringContentCheck(self, self.GetTabContents(),
include_list, exclude_list)
result = self.ExecuteJavascript("""
var r = 'blocked';
var f = 'executed';
var s = document.createElement('script');
s.textContent = 'r = f';
document.body.appendChild(s);
window.domAutomationController.send(r);
""")
logging.debug('has csp %s, result %s.' % (not no_csp, result))
if no_csp:
self.assertEqual(result, 'executed',
msg='Got %s for %s' % (result, url))
else:
self.assertEqual(result, 'blocked',
msg='Got %s for %s' % (result, url))
# Restart browser so that every URL gets a fresh instance.
self.RestartBrowser(clear_profile=True)
def testAboutAppCacheTab(self):
"""Test App Cache tab to confirm about page populates caches."""
self.NavigateToURL('about:appcache-internals')
self._VerifyAppCacheInternals()
self.assertEqual('AppCache Internals', self.GetActiveTabTitle())
def testAboutDNSTab(self):
"""Test DNS tab to confirm DNS about page propogates records."""
self.NavigateToURL('about:dns')
self._VerifyAboutDNS()
self.assertEqual('About DNS', self.GetActiveTabTitle())
def testSpecialAcceratorTabs(self):
"""Test special tabs created by accelerators."""
for accel, title in self.GetSpecialAcceleratorTabs().iteritems():
self.RunCommand(accel)
self.assertTrue(self.WaitUntil(
self.GetActiveTabTitle, expect_retval=title),
msg='Expected "%s", got "%s"' % (title, self.GetActiveTabTitle()))
if __name__ == '__main__':
pyauto_functional.Main()
| bsd-3-clause |
emory-libraries/ddi-search | ddisearch/ddi/tests.py | 1 | 27334 | # file ddisearch/ddi/tests.py
#
# Copyright 2014 Emory University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from copy import copy
import datetime
import shutil
from StringIO import StringIO
import tempfile
from mock import patch
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.management.base import CommandError
from django.test import TestCase
from django.test.utils import override_settings
from eulxml.xmlmap import load_xmlobject_from_file
from eulexistdb import testutil as eulexistdb_testutil
from eulexistdb.db import ExistDB
from ddisearch.ddi import models as ddixml
from ddisearch.ddi.forms import KeywordSearch
from ddisearch.ddi.management.commands import load
FIXTURE_DIR = os.path.join(os.path.dirname(__file__), 'fixtures')
class CodeBookTest(TestCase):
def setUp(self):
self.cb = load_xmlobject_from_file(os.path.join(FIXTURE_DIR, '02988.xml'),
ddixml.CodeBook)
def test_properties(self):
self.assertEqual('Israeli Election Study, 1973', self.cb.title)
self.assert_(self.cb.abstract.startswith('This study is one in a series of election studies conducted since 1969 by Alan Arian'))
self.assert_(isinstance(self.cb.id, ddixml.IDNumber))
self.assertEqual("2988", self.cb.id.val)
self.assertEqual("ICPSR", self.cb.id.agency)
self.assertEqual("2009-03-02", self.cb.document_version.date)
self.assertEqual(2, len(self.cb.authors))
self.assert_(isinstance(self.cb.authors[0], ddixml.Author))
author_list = [unicode(a) for a in self.cb.authors]
self.assert_('Arian, Asher' in author_list)
self.assert_('Turgovnik, Ephraim' in author_list)
self.assertEqual('Tel-Aviv University. Department of Political Science',
self.cb.authors[0].affiliation)
self.assertEqual(35, len(self.cb.keywords))
self.assert_('Arab Israeli conflict' in self.cb.keywords)
self.assert_('social attitudes' in self.cb.keywords)
self.assertEqual(4, len(self.cb.topics))
self.assert_(isinstance(self.cb.topics[0], ddixml.Topic))
topics = [unicode(t) for t in self.cb.topics]
self.assert_('Mass Political Behavior and Attitudes' in topics[0])
self.assertEqual('archive', self.cb.topics[0].source)
# time periods
self.assertEqual(5, len(self.cb.time_periods))
self.assert_(isinstance(self.cb.time_periods[0], ddixml.Date))
self.assertEqual('single', self.cb.time_periods[0].event)
self.assertEqual('1973', self.cb.time_periods[0].date)
self.assertEqual('P1', self.cb.time_periods[0].cycle)
self.assertEqual('start', self.cb.time_periods[1].event)
# collection dates
self.assertEqual(7, len(self.cb.collection_dates))
self.assert_(isinstance(self.cb.collection_dates[0], ddixml.Date))
self.assertEqual('single', self.cb.collection_dates[0].event)
self.assertEqual('1973-05', self.cb.collection_dates[0].date)
self.assertEqual('P1', self.cb.collection_dates[0].cycle)
self.assertEqual('First pre-war', self.cb.collection_dates[0].label)
# nations
self.assertEqual(1, len(self.cb.nations))
self.assert_(isinstance(self.cb.nations[0], ddixml.Nation))
self.assertEqual('Please see geographic coverage.', self.cb.nations[0].val)
# geo coverage
self.assertEqual(2, len(self.cb.geo_coverage))
self.assertEqual('Israel', unicode(self.cb.geo_coverage[0]))
self.assertEqual('Global', unicode(self.cb.geo_coverage[1]))
self.assertEqual('individual', self.cb.analysis_unit[0])
self.assert_(self.cb.universe[0].startswith('Urban adult Jewish population'))
self.assertEqual('survey data', self.cb.kind_of_data[0])
# methodology section
method = self.cb.methods[0]
self.assert_(isinstance(method, ddixml.Method))
datacoll = method.data_collection[0]
self.assert_(isinstance(datacoll, ddixml.DataCollection))
self.assert_('face-to-face interview' in datacoll.sources[0].data_sources)
self.assert_('DDEF.ICPSR/ FREQ.ICPSR/' in datacoll.cleaning[0])
self.assert_('carried out by the Israeli Institute' in method.notes[0])
# NOTE: fixture doesn't include many of the sections mapped in the mdoels
# data access section
access = self.cb.data_access[0]
self.assert_(isinstance(access, ddixml.DataAccess))
# availabilty
self.assert_(isinstance(access.availability[0], ddixml.DataAvailability))
avail = access.availability[0]
self.assertEqual('online', avail.media)
self.assertEqual('http://www.icpsr.umich.edu', avail.access_uri)
self.assert_('Ann Arbor, Mi.: Inter-university Consortium' in avail.access_place)
self.assertEqual('SAS SPSS STATA', avail.collection_size[0])
self.assert_('Stata system file(s), sav Data file(s)' in avail.notes[0])
# usage
self.assert_(isinstance(access.use, ddixml.UseStatement))
self.assert_('Additional special permissions, where applicable' in access.use.special_perms)
self.assert_('read the terms of use below' in access.use.conditions)
self.assert_('original collector of the data, ICPSR,' in access.use.disclaimer)
# file descriptions
self.assertEqual(5, len(self.cb.file_descriptions))
fd1 = self.cb.file_descriptions[0]
self.assert_(isinstance(fd1, ddixml.FileDescription))
self.assertEqual('F1', fd1.id)
self.assertEqual('Part1', fd1.files[0].id)
self.assertEqual('May 1973 Survey', fd1.files[0].name)
def test_dates(self):
dates = self.cb.dates
# one single date and two sets of date ranges
self.assertEqual(3, len(dates))
self.assertEqual('1973', dates[0]) # single date
self.assertEqual('1974-01-13 - 1974-01-15', dates[1])
self.assertEqual('1973-12-19 - 1973-12-20', dates[2])
def test_doi(self):
self.assertEqual('doi:10.3886/ICPSR02988', self.cb.doi)
self.assertEqual('http://dx.doi.org/10.3886/ICPSR02988', self.cb.doi_url)
self.cb.bibligraphic_citation = 'http://doi.org/10.3886/ICPSR34941.v1'
self.cb._doi = None
self.assertEqual('doi:10.3886/ICPSR34941.v1', self.cb.doi)
self.assertEqual('http://dx.doi.org/10.3886/ICPSR34941.v1', self.cb.doi_url)
class KeywordSearchTest(TestCase):
# test keyword search form class
def test_validation(self):
# no terms - invalid
form = KeywordSearch({})
self.assertFalse(form.is_valid())
# no search terms - invalid
req_opts = {'sort': 'relevance', 'per_page': 10}
form = KeywordSearch(req_opts)
self.assertFalse(form.is_valid())
# any single search term - valid
opts = copy(req_opts)
opts.update({'keyword': 'term'})
self.assertTrue(KeywordSearch(opts).is_valid())
opts = copy(req_opts)
opts.update({'title': 'term'})
self.assertTrue(KeywordSearch(opts).is_valid())
opts = copy(req_opts)
opts.update({'summary': 'term'})
self.assertTrue(KeywordSearch(opts).is_valid())
opts = copy(req_opts)
opts.update({'source': 'term'})
self.assertTrue(KeywordSearch(opts).is_valid())
# all search terms is also valid
opts.update({'keyword': 'term1', 'title': 'term2', 'summary': 'term3', 'source': 'term4'})
self.assertTrue(KeywordSearch(opts).is_valid())
def test_all_search_terms(self):
opts = {'sort': 'relevance', 'per_page': 10}
form = KeywordSearch(opts)
# form.is_valid() # validation required to get search terms
self.assertEqual('', form.all_search_terms)
opts['keyword'] = 'horse presidential'
form = KeywordSearch(opts)
form.is_valid()
self.assertEqual(opts['keyword'], form.all_search_terms)
opts.update({'title': '1973', 'source': 'michigan'})
form = KeywordSearch(opts)
form.is_valid()
self.assertEqual(' '.join([opts['keyword'], opts['title'], opts['source']]),
form.all_search_terms)
opts.update({'summary': 'election'})
form = KeywordSearch(opts)
form.is_valid()
self.assertEqual(' '.join([opts['keyword'], opts['title'], opts['summary'], opts['source']]),
form.all_search_terms)
class ViewsTest(eulexistdb_testutil.TestCase):
exist_fixtures = {
'directory': FIXTURE_DIR,
'index': settings.EXISTDB_INDEX_CONFIGFILE # required for fulltext search
}
fixture_filename = '02988.xml'
def setUp(self):
# load fixture xml for access to content
self.cb = load_xmlobject_from_file(os.path.join(FIXTURE_DIR,
self.fixture_filename),
ddixml.CodeBook)
def test_site_index(self):
index_url = reverse('site-index')
response = self.client.get(index_url)
self.assert_('form' in response.context)
self.assertContains(response, "No resources have been added since ",
msg_prefix='fixture data results in no new items on home page')
# Test showing a new object by updating the date in our fixture
# and reloading to exist
self.cb.document_version.date = datetime.date.today()
dbpath = settings.EXISTDB_ROOT_COLLECTION + "/" + self.fixture_filename
db = ExistDB()
db.load(self.cb.serialize(), dbpath, overwrite=True)
response = self.client.get(index_url)
self.assertContains(response, self.cb.title,
msg_prefix='title of new resource should be displayed on home page')
self.assertContains(response,
reverse('ddi:resource', kwargs={'id': self.cb.id.val,
'agency': self.cb.id.agency}),
msg_prefix='link to new resource should be included on home page')
def test_search(self):
# no search terms
search_url = reverse('ddi:search')
response = self.client.get(search_url, {'keyword': ''})
self.assert_('form' in response.context)
self.assert_(isinstance(response.context['form'], KeywordSearch))
# validation error when no search terms are entered
self.assertContains(response, 'enter search terms for at least one of')
# keyword search
response = self.client.get(search_url,
{'per_page': 10, 'sort': 'relevance', 'keyword' : 'israeli election'})
# basic tests for template display of search result
self.assertContains(response, 'Found <strong>1</strong> resource',
msg_prefix='response should indicate number of matches found')
self.assertContains(response, self.cb.title,
msg_prefix='response should include title for matching document')
self.assertContains(response, self.cb.abstract[:50],
msg_prefix='response should include beginning of abstract for matching document')
self.assertContains(response, 'Principal Investigators:',
msg_prefix='response should include principal investigators section when present')
self.assertContains(response, self.cb.authors[0],
msg_prefix='response should include principal investigator when present')
self.assertContains(response, self.cb.keywords[0],
msg_prefix='response should include keywords when present (first keyword)')
self.assertContains(response, self.cb.keywords[1],
msg_prefix='response should include keywords when present (second keyword)')
self.assertContains(response, '; '.join(self.cb.dates),
msg_prefix='response should include document dates')
# title search
# - matches fixture
response = self.client.get(search_url,
{'per_page': 10, 'sort': 'relevance', 'title' : 'israeli election 1973'})
self.assertEqual(1, response.context['results'].paginator.count)
# - does not match fixture
response = self.client.get(search_url,
{'per_page': 10, 'sort': 'relevance', 'title' : 'horse'})
self.assertEqual(0, response.context['results'].paginator.count)
# abstract search
# - matches fixture
response = self.client.get(search_url,
{'per_page': 10, 'sort': 'relevance', 'summary' : 'voting patterns Arab-Israeli conflict'})
self.assertEqual(1, response.context['results'].paginator.count)
# - does not match fixture
response = self.client.get(search_url,
{'per_page': 10, 'sort': 'relevance', 'summary' : 'horse'})
self.assertEqual(0, response.context['results'].paginator.count)
# source search
# - matches fixture
response = self.client.get(search_url,
{'per_page': 10, 'sort': 'relevance', 'source' : 'asher'})
self.assertEqual(1, response.context['results'].paginator.count)
# - does not match fixture
response = self.client.get(search_url,
{'per_page': 10, 'sort': 'relevance', 'source' : 'horse'})
self.assertEqual(0, response.context['results'].paginator.count)
# location search
# - matches fixture
response = self.client.get(search_url,
{'per_page': 10, 'sort': 'relevance', 'location' : 'israel'})
self.assertEqual(1, response.context['results'].paginator.count,
'expected 1 result for search on location:israel')
# - does not match fixture
response = self.client.get(search_url,
{'per_page': 10, 'sort': 'relevance', 'location' : 'brazil'})
self.assertEqual(0, response.context['results'].paginator.count,
'expected no results for search on location:brazil')
# combined fields - match
response = self.client.get(search_url,
{'per_page': 10, 'sort': 'relevance', 'title': 'israeli',
'summary': 'voting', 'source' : 'asher'})
self.assertEqual(1, response.context['results'].paginator.count)
# - no match
response = self.client.get(search_url,
{'per_page': 10, 'sort': 'relevance', 'title': 'israeli',
'summary': 'voting', 'source' : 'horse'})
self.assertEqual(0, response.context['results'].paginator.count)
def test_search_by_date(self):
search_url = reverse('ddi:search')
# single date - partial date match in the fixture (1974-01-15)
response = self.client.get(search_url,
{'per_page': 10, 'sort': 'relevance', 'keyword': 'israeli election',
'start_date': 1974, 'end_date': 1974})
self.assertEqual(1, response.context['results'].paginator.count,
'expected one result from date search on 1974')
# single date with no keyword should also be allowed
response = self.client.get(search_url,
{'per_page': 10, 'sort': 'relevance',
'start_date': 1974, 'end_date': 1974})
self.assertEqual(1, response.context['results'].paginator.count,
'expected one result from date search on 1974 without keyword')
# single date that does not match
response = self.client.get(search_url,
{'per_page': 10, 'sort': 'relevance', 'keyword': 'israeli election',
'start_date': 1975, 'end_date': 1975})
self.assertEqual(0, response.context['results'].paginator.count,
'expected no results from date search on 1975')
# after date
response = self.client.get(search_url,
{'per_page': 10, 'sort': 'relevance', 'keyword': 'israeli election',
'start_date': 1960})
self.assertEqual(1, response.context['results'].paginator.count,
'expected one result from date search on items after 1960')
response = self.client.get(search_url,
{'per_page': 10, 'sort': 'relevance', 'keyword': 'israeli election',
'start_date': 1980})
self.assertEqual(0, response.context['results'].paginator.count,
'expected no results from date search on items after 1980')
# before date
response = self.client.get(search_url,
{'per_page': 10, 'sort': 'relevance', 'keyword': 'israeli election',
'end_date': 1974})
self.assertEqual(1, response.context['results'].paginator.count,
'expected one result from date search on items before 1974')
response = self.client.get(search_url,
{'per_page': 10, 'sort': 'relevance', 'keyword': 'israeli election',
'end_date': 1972})
self.assertEqual(0, response.context['results'].paginator.count,
'expected no results from date search on items before 1972')
# date range
response = self.client.get(search_url,
{'per_page': 10, 'sort': 'relevance', 'keyword': 'israeli election',
'start_date': 1960, 'end_date': 2000})
self.assertEqual(1, response.context['results'].paginator.count,
'expected one result from date search on items from 1960-2000')
response = self.client.get(search_url,
{'per_page': 10, 'sort': 'relevance', 'keyword': 'israeli election',
'start_date': 1960, 'end_date': 1972})
self.assertEqual(0, response.context['results'].paginator.count,
'expected no results from date search on items from 1960-1972')
def test_resource(self):
# single document display
resource_url = reverse('ddi:resource',
kwargs={'id': self.cb.id, 'agency': self.cb.id.agency})
response = self.client.get(resource_url)
self.assertContains(response, '<span class="resource-title">%s</span>' \
% self.cb.title, html=True)
for author in self.cb.authors:
self.assertContains(response, author)
# check subset of keyword terms
for i in range(10):
self.assertContains(response, self.cb.keywords[i])
# just test for part of the abstract (full thing too long for comparison)
self.assertContains(response, '<p>%s' % self.cb.abstract[:150])
self.assertContains(response, '%s</p>' % self.cb.abstract[-150:])
# bogus id should 404
resource_url = reverse('ddi:resource',
kwargs={'id': '12345678', 'agency': self.cb.id.agency})
response = self.client.get(resource_url)
expected, got = 404, response.status_code
self.assertEqual(expected, got,
'expected status code %s for %s with bogus id, got %s' %
(expected, resource_url, got))
def test_topics(self):
# NOTE: the topics xqueries reference the collection directly;
# update the xpaths so it runs against the test database
ddixml.DistinctTopics.objects.xpath = ddixml.DistinctTopics. \
objects.xpath.replace(settings.EXISTDB_ROOT_COLLECTION_REAL,
settings.EXISTDB_ROOT_COLLECTION)
ddixml.DistinctTopics.count_xpath = ddixml.DistinctTopics. \
count_xpath.replace(settings.EXISTDB_ROOT_COLLECTION_REAL,
settings.EXISTDB_ROOT_COLLECTION)
topic_url = reverse('ddi:browse-terms', kwargs={'mode': 'topics'})
response = self.client.get(topic_url)
self.assertContains(response, 'Topics')
# fixture has no local topic
self.assertContains(response, 'No results')
# update the fixture with a local topic for testing
topic = 'International Relations'
self.cb.topics.append(ddixml.Topic(val=topic, vocab='local'))
dbpath = settings.EXISTDB_ROOT_COLLECTION + "/" + self.fixture_filename
db = ExistDB()
db.load(self.cb.serialize(), dbpath, overwrite=True)
response = self.client.get(topic_url)
self.assertContains(response, topic,
msg_prefix='Local topic should be listed on browse topics page')
self.assertContains(response, '<span class="badge">1</span>',
msg_prefix='topic count should be displayed')
# NOTE: should be using html=True here, but page isn't currently valid
# xml (incorrect nesting divs? unclosed favicon in eultheme)
# list resources by topic
response = self.client.get(topic_url, {'topic': topic})
self.assertContains(response, 'Topics : %s' % topic,
msg_prefix='Single topic page should display topic')
self.assertContains(response, 'Found <strong>1</strong> resource',
msg_prefix='Single topic page should display resource count')
self.assertContains(response, self.cb.title,
msg_prefix='Single topic page should display matching item title')
self.assertContains(response, reverse('ddi:resource',
kwargs={'id': self.cb.id, 'agency': self.cb.id.agency}),
msg_prefix='single topic page should link to matching item')
@patch('ddisearch.ddi.management.commands.load.CodebookGeocoder')
class LoadCommandTest(TestCase):
# NOTE: patching CodebookGeocoder to avoid calling GeoNames geocode
# service, and ignoring geocode functionality here because that logic
# is tested in ddisearch.geo.tests
testfile = os.path.join(FIXTURE_DIR, '02988.xml')
def setUp(self):
self.cmd = load.Command()
self.cmd.stdout = StringIO()
self.db = ExistDB()
self._exist_content = []
self.cb = load_xmlobject_from_file(os.path.join(FIXTURE_DIR, '02988.xml'),
ddixml.CodeBook)
def tearDown(self):
# remove any tempfiles loaded to exist
for f in self._exist_content:
self.db.removeDocument(f)
def test_errors(self, mockcbgeocode):
# config error
with override_settings(EXISTDB_ROOT_COLLECTION=''):
self.assertRaises(CommandError, self.cmd.handle)
# invalid file error
self.cmd.handle('/tmp/notarealfile.xml')
self.assert_('Error opening' in self.cmd.stdout.getvalue())
def test_load(self, mockcbgeocode):
tmp = tempfile.NamedTemporaryFile(suffix='.xml', delete=False)
# delete false to avoid error, since the script will remove
shutil.copyfile(self.testfile, tmp.name)
self.cmd.handle(tmp.name)
# check that document was loaded to exist
exist_path = '%s/%s' % (settings.EXISTDB_ROOT_COLLECTION,
os.path.basename(tmp.name))
desc = self.db.describeDocument(exist_path)
self.assert_(desc,
'document description should be non-empty for loaded content')
self._exist_content.append(exist_path) # queue for removal in cleanup
mod = desc['modified']
self.assertEqual(datetime.date(mod.year, mod.month, mod.day),
datetime.date.today(),
'loaded document should show modification date of today')
# check file was removed
self.assertFalse(os.path.exists(tmp.name),
'local copy of file should be deleted after loaded to eXist')
def test_load_remove_error(self, mockcbgeocode):
# simulate error removing local copy of file
tmp = tempfile.NamedTemporaryFile(suffix='.xml')
shutil.copyfile(self.testfile, tmp.name)
# simulate deletion error
with patch('ddisearch.ddi.management.commands.load.os.remove') as mockremove:
mockremove.side_effect = OSError('Permission Denied')
self.cmd.handle(tmp.name)
cmd_output = self.cmd.stdout.getvalue()
self.assert_('Error removing ' in cmd_output,
'script should report when there is an error removing local file')
self.assert_('Permission Denied' in cmd_output)
# queue for removal in cleanup
exist_path = '%s/%s' % (settings.EXISTDB_ROOT_COLLECTION,
os.path.basename(tmp.name))
self._exist_content.append(exist_path)
def test_icpsr_topic_id(self, mockcbgeocode):
# icpsr topic picked up correctly
self.assertEqual('ICPSR.XIV.A.2.b', self.cmd.icpsr_topic_id(self.cb.topics[0].val))
# non-icpsr topic
self.assertEqual(None, self.cmd.icpsr_topic_id(self.cb.topics[1].val))
def test_local_topics(self, mockcbgeocode):
topic_count = len(self.cb.topics)
self.cmd.local_topics(self.cb)
self.assertEqual(topic_count + 1, len(self.cb.topics),
'one new local topic should be added to test record')
self.assertEqual('Elections and Electoral Politics', self.cb.topics[-1].val)
# local vocabulary name subject to change
self.assertEqual('local', self.cb.topics[-1].vocab)
# simulate topic with conditional global topic
topic_count = len(self.cb.topics)
self.cb.topics[0].val = 'ICPSR IV.a'
self.cmd.local_topics(self.cb)
self.assertEqual(topic_count + 2, len(self.cb.topics),
'two new local topics should be added to test record with global coverage')
self.assertEqual('Economic and Financial', self.cb.topics[-2].val)
self.assertEqual('International Political Economy', self.cb.topics[-1].val)
self.assertEqual('local', self.cb.topics[-1].vocab)
# simulate topic with conditional global topic *without* global geog coverage
topic_count = len(self.cb.topics)
self.cb.topics[0].val = 'ICPSR IV.a'
self.cb.geo_coverage[-1].val = 'not global'
self.cmd.local_topics(self.cb)
self.assertEqual(topic_count + 1, len(self.cb.topics),
'only one new local topic should be added to test record without global coverage')
self.assertEqual('Economic and Financial', self.cb.topics[-1].val)
def test_clean_dates(self, mockcbgeocode):
# fixture has no dates that need cleaning, so modify them
# set first date as a year < 1000; should be converted to 4 digits
self.cb.time_periods[0].date = '1'
# second and third dates in the record form are start/end for one cycle;
# simulate case where end date is month only
self.cb.time_periods[1].date = '1974'
self.cb.time_periods[2].date = '02'
self.cmd.clean_dates(self.cb)
self.assertEqual('0001', self.cb.time_periods[0].date,
'dates before 1000 should be converted to 4 digits')
self.assertEqual('1974-02', self.cb.time_periods[2].date,
'two-digit dates ending a cycle should be converted to month-year')
| apache-2.0 |
shtouff/django | tests/template_tests/filter_tests/test_rjust.py | 521 | 1030 | from django.template.defaultfilters import rjust
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class RjustTests(SimpleTestCase):
@setup({'rjust01': '{% autoescape off %}.{{ a|rjust:"5" }}. .{{ b|rjust:"5" }}.{% endautoescape %}'})
def test_rjust01(self):
output = self.engine.render_to_string('rjust01', {"a": "a&b", "b": mark_safe("a&b")})
self.assertEqual(output, ". a&b. . a&b.")
@setup({'rjust02': '.{{ a|rjust:"5" }}. .{{ b|rjust:"5" }}.'})
def test_rjust02(self):
output = self.engine.render_to_string('rjust02', {"a": "a&b", "b": mark_safe("a&b")})
self.assertEqual(output, ". a&b. . a&b.")
class FunctionTests(SimpleTestCase):
def test_rjust(self):
self.assertEqual(rjust('test', 10), ' test')
def test_less_than_string_length(self):
self.assertEqual(rjust('test', 3), 'test')
def test_non_string_input(self):
self.assertEqual(rjust(123, 4), ' 123')
| bsd-3-clause |
jjlee3/openthread | tests/scripts/thread-cert/Cert_6_5_02_ChildResetReattach.py | 5 | 3332 | #!/usr/bin/python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import node
LEADER = 1
ED = 2
class Cert_6_5_2_ChildResetReattach(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1,3):
self.nodes[i] = node.Node(i)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ED].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[ED].set_panid(0xface)
self.nodes[ED].set_mode('rsn')
self.nodes[ED].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ED].enable_whitelist()
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
del self.nodes
def test(self):
self.nodes[LEADER].start()
self.nodes[LEADER].set_state('leader')
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ED].start()
time.sleep(5)
self.assertEqual(self.nodes[ED].get_state(), 'child')
self.nodes[LEADER].remove_whitelist(self.nodes[ED].get_addr64())
self.nodes[ED].remove_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ED].stop()
time.sleep(5)
self.nodes[ED].start()
time.sleep(5)
self.nodes[LEADER].add_whitelist(self.nodes[ED].get_addr64())
self.nodes[ED].add_whitelist(self.nodes[LEADER].get_addr64())
time.sleep(5)
self.assertEqual(self.nodes[ED].get_state(), 'child')
addrs = self.nodes[ED].get_addrs()
for addr in addrs:
if addr[0:4] == 'fe80':
self.assertTrue(self.nodes[LEADER].ping(addr))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
txenoo/django-radio | tasks/utils.py | 1 | 2468 | import tempfile
from contextlib import contextmanager
from invoke import UnexpectedExit
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
HOME_DIR = os.path.expanduser('~')
def run_ignoring_failure(method, command):
try:
method(command)
except UnexpectedExit:
pass
@contextmanager
def chdir(dirname=None):
"""
Not safe running concurrence tasks
"""
current_dir = os.getcwd()
os.chdir(dirname)
yield
os.chdir(current_dir)
@contextmanager
def use_tmp_dir(ctx):
"""
Not safe running concurrence tasks
"""
tmp_path = tempfile.mkdtemp()
ctx.run('cp -R {repo_path} {tmp_path}'.format(
repo_path=os.path.join(BASE_DIR, '.'),
tmp_path=tmp_path)
)
current_dir = os.getcwd()
os.chdir(tmp_path)
yield tmp_path
os.chdir(current_dir)
def commit_settings(ctx, message, dir_name=None, environment='base'):
with chdir(dir_name or BASE_DIR):
run_ignoring_failure(ctx.run, 'git add -f radioco/configs/base/local_settings.py')
if environment != 'base':
run_ignoring_failure(ctx.run, 'git add -f radioco/configs/{}/local_settings.py'.format(environment))
run_ignoring_failure(ctx.run, 'git commit -am "{}"'.format(message))
def get_current_branch(ctx):
return ctx.run('git rev-parse --abbrev-ref HEAD').stdout.strip()
# @contextmanager
# def change_branch(ctx, branch=None):
# """
# Change to other branch temporally if a branch is provided
# """
# current_branch = ctx.run('git rev-parse --abbrev-ref HEAD').stdout
# if branch:
# ctx.run('git checkout {}'.format(branch))
# yield
# ctx.run('git checkout {}'.format(current_branch))
def _read_requirements_file(filename, parent=None):
parent = (parent or __file__)
try:
with open(os.path.join(os.path.dirname(parent), filename)) as f:
return f.read()
except IOError:
return ''
def parse_requirements(filename, parent=None):
parent = (parent or __file__)
filepath = os.path.join(os.path.dirname(parent), filename)
content = _read_requirements_file(filename, parent)
for line_number, line in enumerate(content.splitlines(), 1):
candidate = line.strip()
if candidate.startswith('-r'):
for item in parse_requirements(candidate[2:].strip(), filepath):
yield item
else:
yield candidate | gpl-3.0 |
haowu4682/gem5 | configs/ruby/MI_example.py | 6 | 6628 | # Copyright (c) 2006-2007 The Regents of The University of Michigan
# Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Brad Beckmann
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from Ruby import create_topology
#
# Note: the cache latency is only used by the sequencer on fast path hits
#
class Cache(RubyCache):
latency = 3
def define_options(parser):
return
def create_system(options, system, dma_ports, ruby_system):
if buildEnv['PROTOCOL'] != 'MI_example':
panic("This script requires the MI_example protocol to be built.")
cpu_sequencers = []
#
# The ruby network creation expects the list of nodes in the system to be
# consistent with the NetDest list. Therefore the l1 controller nodes must be
# listed before the directory nodes and directory nodes before dma nodes, etc.
#
l1_cntrl_nodes = []
dir_cntrl_nodes = []
dma_cntrl_nodes = []
#
# Must create the individual controllers before the network to ensure the
# controller constructors are called before the network constructor
#
block_size_bits = int(math.log(options.cacheline_size, 2))
for i in xrange(options.num_cpus):
#
# First create the Ruby objects associated with this cpu
# Only one cache exists for this protocol, so by default use the L1D
# config parameters.
#
cache = Cache(size = options.l1d_size,
assoc = options.l1d_assoc,
start_index_bit = block_size_bits)
#
# Only one unified L1 cache exists. Can cache instructions and data.
#
l1_cntrl = L1Cache_Controller(version = i,
cacheMemory = cache,
send_evictions = (
options.cpu_type == "detailed"),
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
cpu_seq = RubySequencer(version = i,
icache = cache,
dcache = cache,
ruby_system = ruby_system)
l1_cntrl.sequencer = cpu_seq
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
# Run each of the ruby memory controllers at a ratio of the frequency of
# the ruby system.
# clk_divider value is a fix to pass regression.
ruby_system.memctrl_clk_domain = DerivedClockDomain(
clk_domain=ruby_system.clk_domain,
clk_divider=3)
for i in xrange(options.num_dirs):
#
# Create the Ruby objects associated with the directory controller
#
mem_cntrl = RubyMemoryControl(
clk_domain = ruby_system.memctrl_clk_domain,
version = i,
ruby_system = ruby_system)
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
dir_cntrl = Directory_Controller(version = i,
directory = \
RubyDirectoryMemory( \
version = i,
size = dir_size,
use_map = options.use_map,
map_levels = \
options.map_levels),
memBuffer = mem_cntrl,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
#
dma_seq = DMASequencer(version = i,
ruby_system = ruby_system)
dma_cntrl = DMA_Controller(version = i,
dma_sequencer = dma_seq,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
dma_cntrl_nodes.append(dma_cntrl)
all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
topology = create_topology(all_cntrls, options)
return (cpu_sequencers, dir_cntrl_nodes, topology)
| bsd-3-clause |
MrLoick/python-for-android | python-build/python-libs/gdata/samples/oauth/2_legged_oauth.py | 128 | 2463 | #!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'e.bidelman (Eric Bidelman)'
import gdata.contacts
import gdata.contacts.service
import gdata.docs
import gdata.docs.service
CONSUMER_KEY = 'yourdomain.com'
CONSUMER_SECRET = 'YOUR_CONSUMER_KEY'
SIG_METHOD = gdata.auth.OAuthSignatureMethod.HMAC_SHA1
requestor_id = 'any.user@yourdomain.com'
# Contacts Data API ============================================================
contacts = gdata.contacts.service.ContactsService()
contacts.SetOAuthInputParameters(
SIG_METHOD, CONSUMER_KEY, consumer_secret=CONSUMER_SECRET,
two_legged_oauth=True, requestor_id=requestor_id)
# GET - fetch user's contact list
print "\nList of contacts for %s:" % (requestor_id,)
feed = contacts.GetContactsFeed()
for entry in feed.entry:
print entry.title.text
# GET - fetch another user's contact list
requestor_id = 'another_user@yourdomain.com'
print "\nList of contacts for %s:" % (requestor_id,)
contacts.GetOAuthInputParameters().requestor_id = requestor_id
feed = contacts.GetContactsFeed()
for entry in feed.entry:
print entry.title.text
# Google Documents List Data API ===============================================
docs = gdata.docs.service.DocsService()
docs.SetOAuthInputParameters(
SIG_METHOD, CONSUMER_KEY, consumer_secret=CONSUMER_SECRET,
two_legged_oauth=True, requestor_id=requestor_id)
# POST - upload a document
print "\nUploading document to %s's Google Documents account:" % (requestor_id,)
ms = gdata.MediaSource(
file_path='/path/to/test.txt',
content_type=gdata.docs.service.SUPPORTED_FILETYPES['TXT'])
# GET - fetch user's document list
entry = docs.UploadDocument(ms, 'Company Perks')
print 'Document now accessible online at:', entry.GetAlternateLink().href
print "\nList of Google Documents for %s" % (requestor_id,)
feed = docs.GetDocumentListFeed()
for entry in feed.entry:
print entry.title.text
| apache-2.0 |
thurt/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/idlelib/EditorWindow.py | 47 | 58813 | import sys
import os
import re
import imp
from itertools import count
from Tkinter import *
import tkSimpleDialog
import tkMessageBox
from MultiCall import MultiCallCreator
import webbrowser
import idlever
import WindowList
import SearchDialog
import GrepDialog
import ReplaceDialog
import PyParse
from configHandler import idleConf
import aboutDialog, textView, configDialog
import macosxSupport
# The default tab setting for a Text widget, in average-width characters.
TK_TABWIDTH_DEFAULT = 8
def _find_module(fullname, path=None):
"""Version of imp.find_module() that handles hierarchical module names"""
file = None
for tgt in fullname.split('.'):
if file is not None:
file.close() # close intermediate files
(file, filename, descr) = imp.find_module(tgt, path)
if descr[2] == imp.PY_SOURCE:
break # find but not load the source file
module = imp.load_module(tgt, file, filename, descr)
try:
path = module.__path__
except AttributeError:
raise ImportError, 'No source for module ' + module.__name__
return file, filename, descr
class EditorWindow(object):
from Percolator import Percolator
from ColorDelegator import ColorDelegator
from UndoDelegator import UndoDelegator
from IOBinding import IOBinding, filesystemencoding, encoding
import Bindings
from Tkinter import Toplevel
from MultiStatusBar import MultiStatusBar
help_url = None
def __init__(self, flist=None, filename=None, key=None, root=None):
if EditorWindow.help_url is None:
dochome = os.path.join(sys.prefix, 'Doc', 'index.html')
if sys.platform.count('linux'):
# look for html docs in a couple of standard places
pyver = 'python-docs-' + '%s.%s.%s' % sys.version_info[:3]
if os.path.isdir('/var/www/html/python/'): # "python2" rpm
dochome = '/var/www/html/python/index.html'
else:
basepath = '/usr/share/doc/' # standard location
dochome = os.path.join(basepath, pyver,
'Doc', 'index.html')
elif sys.platform[:3] == 'win':
chmfile = os.path.join(sys.prefix, 'Doc',
'Python%d%d.chm' % sys.version_info[:2])
if os.path.isfile(chmfile):
dochome = chmfile
elif macosxSupport.runningAsOSXApp():
# documentation is stored inside the python framework
dochome = os.path.join(sys.prefix,
'Resources/English.lproj/Documentation/index.html')
dochome = os.path.normpath(dochome)
if os.path.isfile(dochome):
EditorWindow.help_url = dochome
if sys.platform == 'darwin':
# Safari requires real file:-URLs
EditorWindow.help_url = 'file://' + EditorWindow.help_url
else:
EditorWindow.help_url = "http://www.python.org/doc/current"
currentTheme=idleConf.CurrentTheme()
self.flist = flist
root = root or flist.root
self.root = root
try:
sys.ps1
except AttributeError:
sys.ps1 = '>>> '
self.menubar = Menu(root)
self.top = top = WindowList.ListedToplevel(root, menu=self.menubar)
if flist:
self.tkinter_vars = flist.vars
#self.top.instance_dict makes flist.inversedict avalable to
#configDialog.py so it can access all EditorWindow instaces
self.top.instance_dict = flist.inversedict
else:
self.tkinter_vars = {} # keys: Tkinter event names
# values: Tkinter variable instances
self.top.instance_dict = {}
self.recent_files_path = os.path.join(idleConf.GetUserCfgDir(),
'recent-files.lst')
self.text_frame = text_frame = Frame(top)
self.vbar = vbar = Scrollbar(text_frame, name='vbar')
self.width = idleConf.GetOption('main','EditorWindow','width')
self.text = text = MultiCallCreator(Text)(
text_frame, name='text', padx=5, wrap='none',
width=self.width,
height=idleConf.GetOption('main','EditorWindow','height') )
self.top.focused_widget = self.text
self.createmenubar()
self.apply_bindings()
self.top.protocol("WM_DELETE_WINDOW", self.close)
self.top.bind("<<close-window>>", self.close_event)
if macosxSupport.runningAsOSXApp():
# Command-W on editorwindows doesn't work without this.
text.bind('<<close-window>>', self.close_event)
text.bind("<<cut>>", self.cut)
text.bind("<<copy>>", self.copy)
text.bind("<<paste>>", self.paste)
text.bind("<<center-insert>>", self.center_insert_event)
text.bind("<<help>>", self.help_dialog)
text.bind("<<python-docs>>", self.python_docs)
text.bind("<<about-idle>>", self.about_dialog)
text.bind("<<open-config-dialog>>", self.config_dialog)
text.bind("<<open-module>>", self.open_module)
text.bind("<<do-nothing>>", lambda event: "break")
text.bind("<<select-all>>", self.select_all)
text.bind("<<remove-selection>>", self.remove_selection)
text.bind("<<find>>", self.find_event)
text.bind("<<find-again>>", self.find_again_event)
text.bind("<<find-in-files>>", self.find_in_files_event)
text.bind("<<find-selection>>", self.find_selection_event)
text.bind("<<replace>>", self.replace_event)
text.bind("<<goto-line>>", self.goto_line_event)
text.bind("<3>", self.right_menu_event)
text.bind("<<smart-backspace>>",self.smart_backspace_event)
text.bind("<<newline-and-indent>>",self.newline_and_indent_event)
text.bind("<<smart-indent>>",self.smart_indent_event)
text.bind("<<indent-region>>",self.indent_region_event)
text.bind("<<dedent-region>>",self.dedent_region_event)
text.bind("<<comment-region>>",self.comment_region_event)
text.bind("<<uncomment-region>>",self.uncomment_region_event)
text.bind("<<tabify-region>>",self.tabify_region_event)
text.bind("<<untabify-region>>",self.untabify_region_event)
text.bind("<<toggle-tabs>>",self.toggle_tabs_event)
text.bind("<<change-indentwidth>>",self.change_indentwidth_event)
text.bind("<Left>", self.move_at_edge_if_selection(0))
text.bind("<Right>", self.move_at_edge_if_selection(1))
text.bind("<<del-word-left>>", self.del_word_left)
text.bind("<<del-word-right>>", self.del_word_right)
text.bind("<<beginning-of-line>>", self.home_callback)
if flist:
flist.inversedict[self] = key
if key:
flist.dict[key] = self
text.bind("<<open-new-window>>", self.new_callback)
text.bind("<<close-all-windows>>", self.flist.close_all_callback)
text.bind("<<open-class-browser>>", self.open_class_browser)
text.bind("<<open-path-browser>>", self.open_path_browser)
self.set_status_bar()
vbar['command'] = text.yview
vbar.pack(side=RIGHT, fill=Y)
text['yscrollcommand'] = vbar.set
fontWeight = 'normal'
if idleConf.GetOption('main', 'EditorWindow', 'font-bold', type='bool'):
fontWeight='bold'
text.config(font=(idleConf.GetOption('main', 'EditorWindow', 'font'),
idleConf.GetOption('main', 'EditorWindow', 'font-size'),
fontWeight))
text_frame.pack(side=LEFT, fill=BOTH, expand=1)
text.pack(side=TOP, fill=BOTH, expand=1)
text.focus_set()
# usetabs true -> literal tab characters are used by indent and
# dedent cmds, possibly mixed with spaces if
# indentwidth is not a multiple of tabwidth,
# which will cause Tabnanny to nag!
# false -> tab characters are converted to spaces by indent
# and dedent cmds, and ditto TAB keystrokes
# Although use-spaces=0 can be configured manually in config-main.def,
# configuration of tabs v. spaces is not supported in the configuration
# dialog. IDLE promotes the preferred Python indentation: use spaces!
usespaces = idleConf.GetOption('main', 'Indent', 'use-spaces', type='bool')
self.usetabs = not usespaces
# tabwidth is the display width of a literal tab character.
# CAUTION: telling Tk to use anything other than its default
# tab setting causes it to use an entirely different tabbing algorithm,
# treating tab stops as fixed distances from the left margin.
# Nobody expects this, so for now tabwidth should never be changed.
self.tabwidth = 8 # must remain 8 until Tk is fixed.
# indentwidth is the number of screen characters per indent level.
# The recommended Python indentation is four spaces.
self.indentwidth = self.tabwidth
self.set_notabs_indentwidth()
# If context_use_ps1 is true, parsing searches back for a ps1 line;
# else searches for a popular (if, def, ...) Python stmt.
self.context_use_ps1 = False
# When searching backwards for a reliable place to begin parsing,
# first start num_context_lines[0] lines back, then
# num_context_lines[1] lines back if that didn't work, and so on.
# The last value should be huge (larger than the # of lines in a
# conceivable file).
# Making the initial values larger slows things down more often.
self.num_context_lines = 50, 500, 5000000
self.per = per = self.Percolator(text)
self.undo = undo = self.UndoDelegator()
per.insertfilter(undo)
text.undo_block_start = undo.undo_block_start
text.undo_block_stop = undo.undo_block_stop
undo.set_saved_change_hook(self.saved_change_hook)
# IOBinding implements file I/O and printing functionality
self.io = io = self.IOBinding(self)
io.set_filename_change_hook(self.filename_change_hook)
# Create the recent files submenu
self.recent_files_menu = Menu(self.menubar)
self.menudict['file'].insert_cascade(3, label='Recent Files',
underline=0,
menu=self.recent_files_menu)
self.update_recent_files_list()
self.color = None # initialized below in self.ResetColorizer
if filename:
if os.path.exists(filename) and not os.path.isdir(filename):
io.loadfile(filename)
else:
io.set_filename(filename)
self.ResetColorizer()
self.saved_change_hook()
self.set_indentation_params(self.ispythonsource(filename))
self.load_extensions()
menu = self.menudict.get('windows')
if menu:
end = menu.index("end")
if end is None:
end = -1
if end >= 0:
menu.add_separator()
end = end + 1
self.wmenu_end = end
WindowList.register_callback(self.postwindowsmenu)
# Some abstractions so IDLE extensions are cross-IDE
self.askyesno = tkMessageBox.askyesno
self.askinteger = tkSimpleDialog.askinteger
self.showerror = tkMessageBox.showerror
def _filename_to_unicode(self, filename):
"""convert filename to unicode in order to display it in Tk"""
if isinstance(filename, unicode) or not filename:
return filename
else:
try:
return filename.decode(self.filesystemencoding)
except UnicodeDecodeError:
# XXX
try:
return filename.decode(self.encoding)
except UnicodeDecodeError:
# byte-to-byte conversion
return filename.decode('iso8859-1')
def new_callback(self, event):
dirname, basename = self.io.defaultfilename()
self.flist.new(dirname)
return "break"
def home_callback(self, event):
if (event.state & 12) != 0 and event.keysym == "Home":
# state&1==shift, state&4==control, state&8==alt
return # <Modifier-Home>; fall back to class binding
if self.text.index("iomark") and \
self.text.compare("iomark", "<=", "insert lineend") and \
self.text.compare("insert linestart", "<=", "iomark"):
insertpt = int(self.text.index("iomark").split(".")[1])
else:
line = self.text.get("insert linestart", "insert lineend")
for insertpt in xrange(len(line)):
if line[insertpt] not in (' ','\t'):
break
else:
insertpt=len(line)
lineat = int(self.text.index("insert").split('.')[1])
if insertpt == lineat:
insertpt = 0
dest = "insert linestart+"+str(insertpt)+"c"
if (event.state&1) == 0:
# shift not pressed
self.text.tag_remove("sel", "1.0", "end")
else:
if not self.text.index("sel.first"):
self.text.mark_set("anchor","insert")
first = self.text.index(dest)
last = self.text.index("anchor")
if self.text.compare(first,">",last):
first,last = last,first
self.text.tag_remove("sel", "1.0", "end")
self.text.tag_add("sel", first, last)
self.text.mark_set("insert", dest)
self.text.see("insert")
return "break"
def set_status_bar(self):
self.status_bar = self.MultiStatusBar(self.top)
if macosxSupport.runningAsOSXApp():
# Insert some padding to avoid obscuring some of the statusbar
# by the resize widget.
self.status_bar.set_label('_padding1', ' ', side=RIGHT)
self.status_bar.set_label('column', 'Col: ?', side=RIGHT)
self.status_bar.set_label('line', 'Ln: ?', side=RIGHT)
self.status_bar.pack(side=BOTTOM, fill=X)
self.text.bind("<<set-line-and-column>>", self.set_line_and_column)
self.text.event_add("<<set-line-and-column>>",
"<KeyRelease>", "<ButtonRelease>")
self.text.after_idle(self.set_line_and_column)
def set_line_and_column(self, event=None):
line, column = self.text.index(INSERT).split('.')
self.status_bar.set_label('column', 'Col: %s' % column)
self.status_bar.set_label('line', 'Ln: %s' % line)
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("format", "F_ormat"),
("run", "_Run"),
("options", "_Options"),
("windows", "_Windows"),
("help", "_Help"),
]
if macosxSupport.runningAsOSXApp():
del menu_specs[-3]
menu_specs[-2] = ("windows", "_Window")
def createmenubar(self):
mbar = self.menubar
self.menudict = menudict = {}
for name, label in self.menu_specs:
underline, label = prepstr(label)
menudict[name] = menu = Menu(mbar, name=name)
mbar.add_cascade(label=label, menu=menu, underline=underline)
if macosxSupport.runningAsOSXApp():
# Insert the application menu
menudict['application'] = menu = Menu(mbar, name='apple')
mbar.add_cascade(label='IDLE', menu=menu)
self.fill_menus()
self.base_helpmenu_length = self.menudict['help'].index(END)
self.reset_help_menu_entries()
def postwindowsmenu(self):
# Only called when Windows menu exists
menu = self.menudict['windows']
end = menu.index("end")
if end is None:
end = -1
if end > self.wmenu_end:
menu.delete(self.wmenu_end+1, end)
WindowList.add_windows_to_menu(menu)
rmenu = None
def right_menu_event(self, event):
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "@%d,%d" % (event.x, event.y))
if not self.rmenu:
self.make_rmenu()
rmenu = self.rmenu
self.event = event
iswin = sys.platform[:3] == 'win'
if iswin:
self.text.config(cursor="arrow")
rmenu.tk_popup(event.x_root, event.y_root)
if iswin:
self.text.config(cursor="ibeam")
rmenu_specs = [
# ("Label", "<<virtual-event>>"), ...
("Close", "<<close-window>>"), # Example
]
def make_rmenu(self):
rmenu = Menu(self.text, tearoff=0)
for label, eventname in self.rmenu_specs:
def command(text=self.text, eventname=eventname):
text.event_generate(eventname)
rmenu.add_command(label=label, command=command)
self.rmenu = rmenu
def about_dialog(self, event=None):
aboutDialog.AboutDialog(self.top,'About IDLE')
def config_dialog(self, event=None):
configDialog.ConfigDialog(self.top,'Settings')
def help_dialog(self, event=None):
fn=os.path.join(os.path.abspath(os.path.dirname(__file__)),'help.txt')
textView.view_file(self.top,'Help',fn)
def python_docs(self, event=None):
if sys.platform[:3] == 'win':
os.startfile(self.help_url)
else:
webbrowser.open(self.help_url)
return "break"
def cut(self,event):
self.text.event_generate("<<Cut>>")
return "break"
def copy(self,event):
if not self.text.tag_ranges("sel"):
# There is no selection, so do nothing and maybe interrupt.
return
self.text.event_generate("<<Copy>>")
return "break"
def paste(self,event):
self.text.event_generate("<<Paste>>")
self.text.see("insert")
return "break"
def select_all(self, event=None):
self.text.tag_add("sel", "1.0", "end-1c")
self.text.mark_set("insert", "1.0")
self.text.see("insert")
return "break"
def remove_selection(self, event=None):
self.text.tag_remove("sel", "1.0", "end")
self.text.see("insert")
def move_at_edge_if_selection(self, edge_index):
"""Cursor move begins at start or end of selection
When a left/right cursor key is pressed create and return to Tkinter a
function which causes a cursor move from the associated edge of the
selection.
"""
self_text_index = self.text.index
self_text_mark_set = self.text.mark_set
edges_table = ("sel.first+1c", "sel.last-1c")
def move_at_edge(event):
if (event.state & 5) == 0: # no shift(==1) or control(==4) pressed
try:
self_text_index("sel.first")
self_text_mark_set("insert", edges_table[edge_index])
except TclError:
pass
return move_at_edge
def del_word_left(self, event):
self.text.event_generate('<Meta-Delete>')
return "break"
def del_word_right(self, event):
self.text.event_generate('<Meta-d>')
return "break"
def find_event(self, event):
SearchDialog.find(self.text)
return "break"
def find_again_event(self, event):
SearchDialog.find_again(self.text)
return "break"
def find_selection_event(self, event):
SearchDialog.find_selection(self.text)
return "break"
def find_in_files_event(self, event):
GrepDialog.grep(self.text, self.io, self.flist)
return "break"
def replace_event(self, event):
ReplaceDialog.replace(self.text)
return "break"
def goto_line_event(self, event):
text = self.text
lineno = tkSimpleDialog.askinteger("Goto",
"Go to line number:",parent=text)
if lineno is None:
return "break"
if lineno <= 0:
text.bell()
return "break"
text.mark_set("insert", "%d.0" % lineno)
text.see("insert")
def open_module(self, event=None):
# XXX Shouldn't this be in IOBinding or in FileList?
try:
name = self.text.get("sel.first", "sel.last")
except TclError:
name = ""
else:
name = name.strip()
name = tkSimpleDialog.askstring("Module",
"Enter the name of a Python module\n"
"to search on sys.path and open:",
parent=self.text, initialvalue=name)
if name:
name = name.strip()
if not name:
return
# XXX Ought to insert current file's directory in front of path
try:
(f, file, (suffix, mode, type)) = _find_module(name)
except (NameError, ImportError), msg:
tkMessageBox.showerror("Import error", str(msg), parent=self.text)
return
if type != imp.PY_SOURCE:
tkMessageBox.showerror("Unsupported type",
"%s is not a source module" % name, parent=self.text)
return
if f:
f.close()
if self.flist:
self.flist.open(file)
else:
self.io.loadfile(file)
def open_class_browser(self, event=None):
filename = self.io.filename
if not filename:
tkMessageBox.showerror(
"No filename",
"This buffer has no associated filename",
master=self.text)
self.text.focus_set()
return None
head, tail = os.path.split(filename)
base, ext = os.path.splitext(tail)
import ClassBrowser
ClassBrowser.ClassBrowser(self.flist, base, [head])
def open_path_browser(self, event=None):
import PathBrowser
PathBrowser.PathBrowser(self.flist)
def gotoline(self, lineno):
if lineno is not None and lineno > 0:
self.text.mark_set("insert", "%d.0" % lineno)
self.text.tag_remove("sel", "1.0", "end")
self.text.tag_add("sel", "insert", "insert +1l")
self.center()
def ispythonsource(self, filename):
if not filename or os.path.isdir(filename):
return True
base, ext = os.path.splitext(os.path.basename(filename))
if os.path.normcase(ext) in (".py", ".pyw"):
return True
try:
f = open(filename)
line = f.readline()
f.close()
except IOError:
return False
return line.startswith('#!') and line.find('python') >= 0
def close_hook(self):
if self.flist:
self.flist.unregister_maybe_terminate(self)
self.flist = None
def set_close_hook(self, close_hook):
self.close_hook = close_hook
def filename_change_hook(self):
if self.flist:
self.flist.filename_changed_edit(self)
self.saved_change_hook()
self.top.update_windowlist_registry(self)
self.ResetColorizer()
def _addcolorizer(self):
if self.color:
return
if self.ispythonsource(self.io.filename):
self.color = self.ColorDelegator()
# can add more colorizers here...
if self.color:
self.per.removefilter(self.undo)
self.per.insertfilter(self.color)
self.per.insertfilter(self.undo)
def _rmcolorizer(self):
if not self.color:
return
self.color.removecolors()
self.per.removefilter(self.color)
self.color = None
def ResetColorizer(self):
"Update the colour theme"
# Called from self.filename_change_hook and from configDialog.py
self._rmcolorizer()
self._addcolorizer()
theme = idleConf.GetOption('main','Theme','name')
normal_colors = idleConf.GetHighlight(theme, 'normal')
cursor_color = idleConf.GetHighlight(theme, 'cursor', fgBg='fg')
select_colors = idleConf.GetHighlight(theme, 'hilite')
self.text.config(
foreground=normal_colors['foreground'],
background=normal_colors['background'],
insertbackground=cursor_color,
selectforeground=select_colors['foreground'],
selectbackground=select_colors['background'],
)
def ResetFont(self):
"Update the text widgets' font if it is changed"
# Called from configDialog.py
fontWeight='normal'
if idleConf.GetOption('main','EditorWindow','font-bold',type='bool'):
fontWeight='bold'
self.text.config(font=(idleConf.GetOption('main','EditorWindow','font'),
idleConf.GetOption('main','EditorWindow','font-size'),
fontWeight))
def RemoveKeybindings(self):
"Remove the keybindings before they are changed."
# Called from configDialog.py
self.Bindings.default_keydefs = keydefs = idleConf.GetCurrentKeySet()
for event, keylist in keydefs.items():
self.text.event_delete(event, *keylist)
for extensionName in self.get_standard_extension_names():
xkeydefs = idleConf.GetExtensionBindings(extensionName)
if xkeydefs:
for event, keylist in xkeydefs.items():
self.text.event_delete(event, *keylist)
def ApplyKeybindings(self):
"Update the keybindings after they are changed"
# Called from configDialog.py
self.Bindings.default_keydefs = keydefs = idleConf.GetCurrentKeySet()
self.apply_bindings()
for extensionName in self.get_standard_extension_names():
xkeydefs = idleConf.GetExtensionBindings(extensionName)
if xkeydefs:
self.apply_bindings(xkeydefs)
#update menu accelerators
menuEventDict = {}
for menu in self.Bindings.menudefs:
menuEventDict[menu[0]] = {}
for item in menu[1]:
if item:
menuEventDict[menu[0]][prepstr(item[0])[1]] = item[1]
for menubarItem in self.menudict.keys():
menu = self.menudict[menubarItem]
end = menu.index(END) + 1
for index in range(0, end):
if menu.type(index) == 'command':
accel = menu.entrycget(index, 'accelerator')
if accel:
itemName = menu.entrycget(index, 'label')
event = ''
if menuEventDict.has_key(menubarItem):
if menuEventDict[menubarItem].has_key(itemName):
event = menuEventDict[menubarItem][itemName]
if event:
accel = get_accelerator(keydefs, event)
menu.entryconfig(index, accelerator=accel)
def set_notabs_indentwidth(self):
"Update the indentwidth if changed and not using tabs in this window"
# Called from configDialog.py
if not self.usetabs:
self.indentwidth = idleConf.GetOption('main', 'Indent','num-spaces',
type='int')
def reset_help_menu_entries(self):
"Update the additional help entries on the Help menu"
help_list = idleConf.GetAllExtraHelpSourcesList()
helpmenu = self.menudict['help']
# first delete the extra help entries, if any
helpmenu_length = helpmenu.index(END)
if helpmenu_length > self.base_helpmenu_length:
helpmenu.delete((self.base_helpmenu_length + 1), helpmenu_length)
# then rebuild them
if help_list:
helpmenu.add_separator()
for entry in help_list:
cmd = self.__extra_help_callback(entry[1])
helpmenu.add_command(label=entry[0], command=cmd)
# and update the menu dictionary
self.menudict['help'] = helpmenu
def __extra_help_callback(self, helpfile):
"Create a callback with the helpfile value frozen at definition time"
def display_extra_help(helpfile=helpfile):
if not helpfile.startswith(('www', 'http')):
url = os.path.normpath(helpfile)
if sys.platform[:3] == 'win':
os.startfile(helpfile)
else:
webbrowser.open(helpfile)
return display_extra_help
def update_recent_files_list(self, new_file=None):
"Load and update the recent files list and menus"
rf_list = []
if os.path.exists(self.recent_files_path):
rf_list_file = open(self.recent_files_path,'r')
try:
rf_list = rf_list_file.readlines()
finally:
rf_list_file.close()
if new_file:
new_file = os.path.abspath(new_file) + '\n'
if new_file in rf_list:
rf_list.remove(new_file) # move to top
rf_list.insert(0, new_file)
# clean and save the recent files list
bad_paths = []
for path in rf_list:
if '\0' in path or not os.path.exists(path[0:-1]):
bad_paths.append(path)
rf_list = [path for path in rf_list if path not in bad_paths]
ulchars = "1234567890ABCDEFGHIJK"
rf_list = rf_list[0:len(ulchars)]
rf_file = open(self.recent_files_path, 'w')
try:
rf_file.writelines(rf_list)
finally:
rf_file.close()
# for each edit window instance, construct the recent files menu
for instance in self.top.instance_dict.keys():
menu = instance.recent_files_menu
menu.delete(1, END) # clear, and rebuild:
for i, file in zip(count(), rf_list):
file_name = file[0:-1] # zap \n
# make unicode string to display non-ASCII chars correctly
ufile_name = self._filename_to_unicode(file_name)
callback = instance.__recent_file_callback(file_name)
menu.add_command(label=ulchars[i] + " " + ufile_name,
command=callback,
underline=0)
def __recent_file_callback(self, file_name):
def open_recent_file(fn_closure=file_name):
self.io.open(editFile=fn_closure)
return open_recent_file
def saved_change_hook(self):
short = self.short_title()
long = self.long_title()
if short and long:
title = short + " - " + long
elif short:
title = short
elif long:
title = long
else:
title = "Untitled"
icon = short or long or title
if not self.get_saved():
title = "*%s*" % title
icon = "*%s" % icon
self.top.wm_title(title)
self.top.wm_iconname(icon)
def get_saved(self):
return self.undo.get_saved()
def set_saved(self, flag):
self.undo.set_saved(flag)
def reset_undo(self):
self.undo.reset_undo()
def short_title(self):
filename = self.io.filename
if filename:
filename = os.path.basename(filename)
# return unicode string to display non-ASCII chars correctly
return self._filename_to_unicode(filename)
def long_title(self):
# return unicode string to display non-ASCII chars correctly
return self._filename_to_unicode(self.io.filename or "")
def center_insert_event(self, event):
self.center()
def center(self, mark="insert"):
text = self.text
top, bot = self.getwindowlines()
lineno = self.getlineno(mark)
height = bot - top
newtop = max(1, lineno - height//2)
text.yview(float(newtop))
def getwindowlines(self):
text = self.text
top = self.getlineno("@0,0")
bot = self.getlineno("@0,65535")
if top == bot and text.winfo_height() == 1:
# Geometry manager hasn't run yet
height = int(text['height'])
bot = top + height - 1
return top, bot
def getlineno(self, mark="insert"):
text = self.text
return int(float(text.index(mark)))
def get_geometry(self):
"Return (width, height, x, y)"
geom = self.top.wm_geometry()
m = re.match(r"(\d+)x(\d+)\+(-?\d+)\+(-?\d+)", geom)
tuple = (map(int, m.groups()))
return tuple
def close_event(self, event):
self.close()
def maybesave(self):
if self.io:
if not self.get_saved():
if self.top.state()!='normal':
self.top.deiconify()
self.top.lower()
self.top.lift()
return self.io.maybesave()
def close(self):
reply = self.maybesave()
if str(reply) != "cancel":
self._close()
return reply
def _close(self):
if self.io.filename:
self.update_recent_files_list(new_file=self.io.filename)
WindowList.unregister_callback(self.postwindowsmenu)
self.unload_extensions()
self.io.close()
self.io = None
self.undo = None
if self.color:
self.color.close(False)
self.color = None
self.text = None
self.tkinter_vars = None
self.per.close()
self.per = None
self.top.destroy()
if self.close_hook:
# unless override: unregister from flist, terminate if last window
self.close_hook()
def load_extensions(self):
self.extensions = {}
self.load_standard_extensions()
def unload_extensions(self):
for ins in self.extensions.values():
if hasattr(ins, "close"):
ins.close()
self.extensions = {}
def load_standard_extensions(self):
for name in self.get_standard_extension_names():
try:
self.load_extension(name)
except:
print "Failed to load extension", repr(name)
import traceback
traceback.print_exc()
def get_standard_extension_names(self):
return idleConf.GetExtensions(editor_only=True)
def load_extension(self, name):
try:
mod = __import__(name, globals(), locals(), [])
except ImportError:
print "\nFailed to import extension: ", name
return
cls = getattr(mod, name)
keydefs = idleConf.GetExtensionBindings(name)
if hasattr(cls, "menudefs"):
self.fill_menus(cls.menudefs, keydefs)
ins = cls(self)
self.extensions[name] = ins
if keydefs:
self.apply_bindings(keydefs)
for vevent in keydefs.keys():
methodname = vevent.replace("-", "_")
while methodname[:1] == '<':
methodname = methodname[1:]
while methodname[-1:] == '>':
methodname = methodname[:-1]
methodname = methodname + "_event"
if hasattr(ins, methodname):
self.text.bind(vevent, getattr(ins, methodname))
def apply_bindings(self, keydefs=None):
if keydefs is None:
keydefs = self.Bindings.default_keydefs
text = self.text
text.keydefs = keydefs
for event, keylist in keydefs.items():
if keylist:
text.event_add(event, *keylist)
def fill_menus(self, menudefs=None, keydefs=None):
"""Add appropriate entries to the menus and submenus
Menus that are absent or None in self.menudict are ignored.
"""
if menudefs is None:
menudefs = self.Bindings.menudefs
if keydefs is None:
keydefs = self.Bindings.default_keydefs
menudict = self.menudict
text = self.text
for mname, entrylist in menudefs:
menu = menudict.get(mname)
if not menu:
continue
for entry in entrylist:
if not entry:
menu.add_separator()
else:
label, eventname = entry
checkbutton = (label[:1] == '!')
if checkbutton:
label = label[1:]
underline, label = prepstr(label)
accelerator = get_accelerator(keydefs, eventname)
def command(text=text, eventname=eventname):
text.event_generate(eventname)
if checkbutton:
var = self.get_var_obj(eventname, BooleanVar)
menu.add_checkbutton(label=label, underline=underline,
command=command, accelerator=accelerator,
variable=var)
else:
menu.add_command(label=label, underline=underline,
command=command,
accelerator=accelerator)
def getvar(self, name):
var = self.get_var_obj(name)
if var:
value = var.get()
return value
else:
raise NameError, name
def setvar(self, name, value, vartype=None):
var = self.get_var_obj(name, vartype)
if var:
var.set(value)
else:
raise NameError, name
def get_var_obj(self, name, vartype=None):
var = self.tkinter_vars.get(name)
if not var and vartype:
# create a Tkinter variable object with self.text as master:
self.tkinter_vars[name] = var = vartype(self.text)
return var
# Tk implementations of "virtual text methods" -- each platform
# reusing IDLE's support code needs to define these for its GUI's
# flavor of widget.
# Is character at text_index in a Python string? Return 0 for
# "guaranteed no", true for anything else. This info is expensive
# to compute ab initio, but is probably already known by the
# platform's colorizer.
def is_char_in_string(self, text_index):
if self.color:
# Return true iff colorizer hasn't (re)gotten this far
# yet, or the character is tagged as being in a string
return self.text.tag_prevrange("TODO", text_index) or \
"STRING" in self.text.tag_names(text_index)
else:
# The colorizer is missing: assume the worst
return 1
# If a selection is defined in the text widget, return (start,
# end) as Tkinter text indices, otherwise return (None, None)
def get_selection_indices(self):
try:
first = self.text.index("sel.first")
last = self.text.index("sel.last")
return first, last
except TclError:
return None, None
# Return the text widget's current view of what a tab stop means
# (equivalent width in spaces).
def get_tabwidth(self):
current = self.text['tabs'] or TK_TABWIDTH_DEFAULT
return int(current)
# Set the text widget's current view of what a tab stop means.
def set_tabwidth(self, newtabwidth):
text = self.text
if self.get_tabwidth() != newtabwidth:
pixels = text.tk.call("font", "measure", text["font"],
"-displayof", text.master,
"n" * newtabwidth)
text.configure(tabs=pixels)
# If ispythonsource and guess are true, guess a good value for
# indentwidth based on file content (if possible), and if
# indentwidth != tabwidth set usetabs false.
# In any case, adjust the Text widget's view of what a tab
# character means.
def set_indentation_params(self, ispythonsource, guess=True):
if guess and ispythonsource:
i = self.guess_indent()
if 2 <= i <= 8:
self.indentwidth = i
if self.indentwidth != self.tabwidth:
self.usetabs = False
self.set_tabwidth(self.tabwidth)
def smart_backspace_event(self, event):
text = self.text
first, last = self.get_selection_indices()
if first and last:
text.delete(first, last)
text.mark_set("insert", first)
return "break"
# Delete whitespace left, until hitting a real char or closest
# preceding virtual tab stop.
chars = text.get("insert linestart", "insert")
if chars == '':
if text.compare("insert", ">", "1.0"):
# easy: delete preceding newline
text.delete("insert-1c")
else:
text.bell() # at start of buffer
return "break"
if chars[-1] not in " \t":
# easy: delete preceding real char
text.delete("insert-1c")
return "break"
# Ick. It may require *inserting* spaces if we back up over a
# tab character! This is written to be clear, not fast.
tabwidth = self.tabwidth
have = len(chars.expandtabs(tabwidth))
assert have > 0
want = ((have - 1) // self.indentwidth) * self.indentwidth
# Debug prompt is multilined....
last_line_of_prompt = sys.ps1.split('\n')[-1]
ncharsdeleted = 0
while 1:
if chars == last_line_of_prompt:
break
chars = chars[:-1]
ncharsdeleted = ncharsdeleted + 1
have = len(chars.expandtabs(tabwidth))
if have <= want or chars[-1] not in " \t":
break
text.undo_block_start()
text.delete("insert-%dc" % ncharsdeleted, "insert")
if have < want:
text.insert("insert", ' ' * (want - have))
text.undo_block_stop()
return "break"
def smart_indent_event(self, event):
# if intraline selection:
# delete it
# elif multiline selection:
# do indent-region
# else:
# indent one level
text = self.text
first, last = self.get_selection_indices()
text.undo_block_start()
try:
if first and last:
if index2line(first) != index2line(last):
return self.indent_region_event(event)
text.delete(first, last)
text.mark_set("insert", first)
prefix = text.get("insert linestart", "insert")
raw, effective = classifyws(prefix, self.tabwidth)
if raw == len(prefix):
# only whitespace to the left
self.reindent_to(effective + self.indentwidth)
else:
# tab to the next 'stop' within or to right of line's text:
if self.usetabs:
pad = '\t'
else:
effective = len(prefix.expandtabs(self.tabwidth))
n = self.indentwidth
pad = ' ' * (n - effective % n)
text.insert("insert", pad)
text.see("insert")
return "break"
finally:
text.undo_block_stop()
def newline_and_indent_event(self, event):
text = self.text
first, last = self.get_selection_indices()
text.undo_block_start()
try:
if first and last:
text.delete(first, last)
text.mark_set("insert", first)
line = text.get("insert linestart", "insert")
i, n = 0, len(line)
while i < n and line[i] in " \t":
i = i+1
if i == n:
# the cursor is in or at leading indentation in a continuation
# line; just inject an empty line at the start
text.insert("insert linestart", '\n')
return "break"
indent = line[:i]
# strip whitespace before insert point unless it's in the prompt
i = 0
last_line_of_prompt = sys.ps1.split('\n')[-1]
while line and line[-1] in " \t" and line != last_line_of_prompt:
line = line[:-1]
i = i+1
if i:
text.delete("insert - %d chars" % i, "insert")
# strip whitespace after insert point
while text.get("insert") in " \t":
text.delete("insert")
# start new line
text.insert("insert", '\n')
# adjust indentation for continuations and block
# open/close first need to find the last stmt
lno = index2line(text.index('insert'))
y = PyParse.Parser(self.indentwidth, self.tabwidth)
if not self.context_use_ps1:
for context in self.num_context_lines:
startat = max(lno - context, 1)
startatindex = `startat` + ".0"
rawtext = text.get(startatindex, "insert")
y.set_str(rawtext)
bod = y.find_good_parse_start(
self.context_use_ps1,
self._build_char_in_string_func(startatindex))
if bod is not None or startat == 1:
break
y.set_lo(bod or 0)
else:
r = text.tag_prevrange("console", "insert")
if r:
startatindex = r[1]
else:
startatindex = "1.0"
rawtext = text.get(startatindex, "insert")
y.set_str(rawtext)
y.set_lo(0)
c = y.get_continuation_type()
if c != PyParse.C_NONE:
# The current stmt hasn't ended yet.
if c == PyParse.C_STRING_FIRST_LINE:
# after the first line of a string; do not indent at all
pass
elif c == PyParse.C_STRING_NEXT_LINES:
# inside a string which started before this line;
# just mimic the current indent
text.insert("insert", indent)
elif c == PyParse.C_BRACKET:
# line up with the first (if any) element of the
# last open bracket structure; else indent one
# level beyond the indent of the line with the
# last open bracket
self.reindent_to(y.compute_bracket_indent())
elif c == PyParse.C_BACKSLASH:
# if more than one line in this stmt already, just
# mimic the current indent; else if initial line
# has a start on an assignment stmt, indent to
# beyond leftmost =; else to beyond first chunk of
# non-whitespace on initial line
if y.get_num_lines_in_stmt() > 1:
text.insert("insert", indent)
else:
self.reindent_to(y.compute_backslash_indent())
else:
assert 0, "bogus continuation type %r" % (c,)
return "break"
# This line starts a brand new stmt; indent relative to
# indentation of initial line of closest preceding
# interesting stmt.
indent = y.get_base_indent_string()
text.insert("insert", indent)
if y.is_block_opener():
self.smart_indent_event(event)
elif indent and y.is_block_closer():
self.smart_backspace_event(event)
return "break"
finally:
text.see("insert")
text.undo_block_stop()
# Our editwin provides a is_char_in_string function that works
# with a Tk text index, but PyParse only knows about offsets into
# a string. This builds a function for PyParse that accepts an
# offset.
def _build_char_in_string_func(self, startindex):
def inner(offset, _startindex=startindex,
_icis=self.is_char_in_string):
return _icis(_startindex + "+%dc" % offset)
return inner
def indent_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, self.tabwidth)
effective = effective + self.indentwidth
lines[pos] = self._make_blanks(effective) + line[raw:]
self.set_region(head, tail, chars, lines)
return "break"
def dedent_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, self.tabwidth)
effective = max(effective - self.indentwidth, 0)
lines[pos] = self._make_blanks(effective) + line[raw:]
self.set_region(head, tail, chars, lines)
return "break"
def comment_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines) - 1):
line = lines[pos]
lines[pos] = '##' + line
self.set_region(head, tail, chars, lines)
def uncomment_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if not line:
continue
if line[:2] == '##':
line = line[2:]
elif line[:1] == '#':
line = line[1:]
lines[pos] = line
self.set_region(head, tail, chars, lines)
def tabify_region_event(self, event):
head, tail, chars, lines = self.get_region()
tabwidth = self._asktabwidth()
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, tabwidth)
ntabs, nspaces = divmod(effective, tabwidth)
lines[pos] = '\t' * ntabs + ' ' * nspaces + line[raw:]
self.set_region(head, tail, chars, lines)
def untabify_region_event(self, event):
head, tail, chars, lines = self.get_region()
tabwidth = self._asktabwidth()
for pos in range(len(lines)):
lines[pos] = lines[pos].expandtabs(tabwidth)
self.set_region(head, tail, chars, lines)
def toggle_tabs_event(self, event):
if self.askyesno(
"Toggle tabs",
"Turn tabs " + ("on", "off")[self.usetabs] +
"?\nIndent width " +
("will be", "remains at")[self.usetabs] + " 8." +
"\n Note: a tab is always 8 columns",
parent=self.text):
self.usetabs = not self.usetabs
# Try to prevent inconsistent indentation.
# User must change indent width manually after using tabs.
self.indentwidth = 8
return "break"
# XXX this isn't bound to anything -- see tabwidth comments
## def change_tabwidth_event(self, event):
## new = self._asktabwidth()
## if new != self.tabwidth:
## self.tabwidth = new
## self.set_indentation_params(0, guess=0)
## return "break"
def change_indentwidth_event(self, event):
new = self.askinteger(
"Indent width",
"New indent width (2-16)\n(Always use 8 when using tabs)",
parent=self.text,
initialvalue=self.indentwidth,
minvalue=2,
maxvalue=16)
if new and new != self.indentwidth and not self.usetabs:
self.indentwidth = new
return "break"
def get_region(self):
text = self.text
first, last = self.get_selection_indices()
if first and last:
head = text.index(first + " linestart")
tail = text.index(last + "-1c lineend +1c")
else:
head = text.index("insert linestart")
tail = text.index("insert lineend +1c")
chars = text.get(head, tail)
lines = chars.split("\n")
return head, tail, chars, lines
def set_region(self, head, tail, chars, lines):
text = self.text
newchars = "\n".join(lines)
if newchars == chars:
text.bell()
return
text.tag_remove("sel", "1.0", "end")
text.mark_set("insert", head)
text.undo_block_start()
text.delete(head, tail)
text.insert(head, newchars)
text.undo_block_stop()
text.tag_add("sel", head, "insert")
# Make string that displays as n leading blanks.
def _make_blanks(self, n):
if self.usetabs:
ntabs, nspaces = divmod(n, self.tabwidth)
return '\t' * ntabs + ' ' * nspaces
else:
return ' ' * n
# Delete from beginning of line to insert point, then reinsert
# column logical (meaning use tabs if appropriate) spaces.
def reindent_to(self, column):
text = self.text
text.undo_block_start()
if text.compare("insert linestart", "!=", "insert"):
text.delete("insert linestart", "insert")
if column:
text.insert("insert", self._make_blanks(column))
text.undo_block_stop()
def _asktabwidth(self):
return self.askinteger(
"Tab width",
"Columns per tab? (2-16)",
parent=self.text,
initialvalue=self.indentwidth,
minvalue=2,
maxvalue=16) or self.tabwidth
# Guess indentwidth from text content.
# Return guessed indentwidth. This should not be believed unless
# it's in a reasonable range (e.g., it will be 0 if no indented
# blocks are found).
def guess_indent(self):
opener, indented = IndentSearcher(self.text, self.tabwidth).run()
if opener and indented:
raw, indentsmall = classifyws(opener, self.tabwidth)
raw, indentlarge = classifyws(indented, self.tabwidth)
else:
indentsmall = indentlarge = 0
return indentlarge - indentsmall
# "line.col" -> line, as an int
def index2line(index):
return int(float(index))
# Look at the leading whitespace in s.
# Return pair (# of leading ws characters,
# effective # of leading blanks after expanding
# tabs to width tabwidth)
def classifyws(s, tabwidth):
raw = effective = 0
for ch in s:
if ch == ' ':
raw = raw + 1
effective = effective + 1
elif ch == '\t':
raw = raw + 1
effective = (effective // tabwidth + 1) * tabwidth
else:
break
return raw, effective
import tokenize
_tokenize = tokenize
del tokenize
class IndentSearcher(object):
# .run() chews over the Text widget, looking for a block opener
# and the stmt following it. Returns a pair,
# (line containing block opener, line containing stmt)
# Either or both may be None.
def __init__(self, text, tabwidth):
self.text = text
self.tabwidth = tabwidth
self.i = self.finished = 0
self.blkopenline = self.indentedline = None
def readline(self):
if self.finished:
return ""
i = self.i = self.i + 1
mark = repr(i) + ".0"
if self.text.compare(mark, ">=", "end"):
return ""
return self.text.get(mark, mark + " lineend+1c")
def tokeneater(self, type, token, start, end, line,
INDENT=_tokenize.INDENT,
NAME=_tokenize.NAME,
OPENERS=('class', 'def', 'for', 'if', 'try', 'while')):
if self.finished:
pass
elif type == NAME and token in OPENERS:
self.blkopenline = line
elif type == INDENT and self.blkopenline:
self.indentedline = line
self.finished = 1
def run(self):
save_tabsize = _tokenize.tabsize
_tokenize.tabsize = self.tabwidth
try:
try:
_tokenize.tokenize(self.readline, self.tokeneater)
except _tokenize.TokenError:
# since we cut off the tokenizer early, we can trigger
# spurious errors
pass
finally:
_tokenize.tabsize = save_tabsize
return self.blkopenline, self.indentedline
### end autoindent code ###
def prepstr(s):
# Helper to extract the underscore from a string, e.g.
# prepstr("Co_py") returns (2, "Copy").
i = s.find('_')
if i >= 0:
s = s[:i] + s[i+1:]
return i, s
keynames = {
'bracketleft': '[',
'bracketright': ']',
'slash': '/',
}
def get_accelerator(keydefs, eventname):
keylist = keydefs.get(eventname)
if not keylist:
return ""
s = keylist[0]
s = re.sub(r"-[a-z]\b", lambda m: m.group().upper(), s)
s = re.sub(r"\b\w+\b", lambda m: keynames.get(m.group(), m.group()), s)
s = re.sub("Key-", "", s)
s = re.sub("Cancel","Ctrl-Break",s) # dscherer@cmu.edu
s = re.sub("Control-", "Ctrl-", s)
s = re.sub("-", "+", s)
s = re.sub("><", " ", s)
s = re.sub("<", "", s)
s = re.sub(">", "", s)
return s
def fixwordbreaks(root):
# Make sure that Tk's double-click and next/previous word
# operations use our definition of a word (i.e. an identifier)
tk = root.tk
tk.call('tcl_wordBreakAfter', 'a b', 0) # make sure word.tcl is loaded
tk.call('set', 'tcl_wordchars', '[a-zA-Z0-9_]')
tk.call('set', 'tcl_nonwordchars', '[^a-zA-Z0-9_]')
def test():
root = Tk()
fixwordbreaks(root)
root.withdraw()
if sys.argv[1:]:
filename = sys.argv[1]
else:
filename = None
edit = EditorWindow(root=root, filename=filename)
edit.set_close_hook(root.quit)
edit.text.bind("<<close-all-windows>>", edit.close_event)
root.mainloop()
root.destroy()
if __name__ == '__main__':
test()
| apache-2.0 |
minhphung171093/GreenERP_V8 | openerp/addons/purchase_double_validation/purchase_double_validation_installer.py | 432 | 2315 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class purchase_config_settings(osv.osv_memory):
_inherit = 'purchase.config.settings'
_columns = {
'limit_amount': fields.integer('limit to require a second approval',required=True,
help="Amount after which validation of purchase is required."),
}
_defaults = {
'limit_amount': 5000,
}
def get_default_limit_amount(self, cr, uid, fields, context=None):
ir_model_data = self.pool.get('ir.model.data')
transition = ir_model_data.get_object(cr, uid, 'purchase_double_validation', 'trans_confirmed_double_lt')
field, value = transition.condition.split('<', 1)
return {'limit_amount': int(value)}
def set_limit_amount(self, cr, uid, ids, context=None):
ir_model_data = self.pool.get('ir.model.data')
config = self.browse(cr, uid, ids[0], context)
waiting = ir_model_data.get_object(cr, uid, 'purchase_double_validation', 'trans_confirmed_double_gt')
waiting.write({'condition': 'amount_total >= %s' % config.limit_amount})
confirm = ir_model_data.get_object(cr, uid, 'purchase_double_validation', 'trans_confirmed_double_lt')
confirm.write({'condition': 'amount_total < %s' % config.limit_amount})
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Dawny33/luigi | luigi/contrib/sge.py | 29 | 11318 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SGE batch system Tasks.
Adapted by Jake Feala (@jfeala) from
`LSF extension <https://github.com/dattalab/luigi/blob/lsf/luigi/lsf.py>`_
by Alex Wiltschko (@alexbw)
Maintained by Jake Feala (@jfeala)
SunGrid Engine is a job scheduler used to allocate compute resources on a
shared cluster. Jobs are submitted using the ``qsub`` command and monitored
using ``qstat``. To get started, install luigi on all nodes.
To run luigi workflows on an SGE cluster, subclass
:class:`luigi.contrib.sge.SGEJobTask` as you would any :class:`luigi.Task`,
but override the ``work()`` method, instead of ``run()``, to define the job
code. Then, run your Luigi workflow from the master node, assigning > 1
``workers`` in order to distribute the tasks in parallel across the cluster.
The following is an example usage (and can also be found in ``sge_tests.py``)
.. code-block:: python
import logging
import luigi
from luigi.contrib.sge import SGEJobTask
logger = logging.getLogger('luigi-interface')
class TestJobTask(SGEJobTask):
i = luigi.Parameter()
def work(self):
logger.info('Running test job...')
with open(self.output().path, 'w') as f:
f.write('this is a test')
def output(self):
return luigi.LocalTarget(os.path.join('/home', 'testfile_' + str(self.i)))
if __name__ == '__main__':
tasks = [TestJobTask(i=str(i), n_cpu=i+1) for i in range(3)]
luigi.build(tasks, local_scheduler=True, workers=3)
The ``n-cpu`` parameter allows you to define different compute resource
requirements (or slots, in SGE terms) for each task. In this example, the
third Task asks for 3 CPU slots. If your cluster only contains nodes with
2 CPUs, this task will hang indefinitely in the queue. See the docs for
:class:`luigi.contrib.sge.SGEJobTask` for other SGE parameters. As for any
task, you can also set these in your luigi configuration file as shown below.
The default values below were matched to the values used by MIT StarCluster,
an open-source SGE cluster manager for use with Amazon EC2::
[SGEJobTask]
shared-tmp-dir = /home
parallel-env = orte
n-cpu = 2
"""
# This extension is modeled after the hadoop.py approach.
#
# Implementation notes
# The procedure:
# - Pickle the class
# - Construct a qsub argument that runs a generic runner function with the path to the pickled class
# - Runner function loads the class from pickle
# - Runner function hits the work button on it
import os
import subprocess
import time
import sys
import logging
import random
import shutil
try:
import cPickle as pickle
except ImportError:
import pickle
import luigi
import luigi.hadoop
from luigi.contrib import sge_runner
logger = logging.getLogger('luigi-interface')
logger.propagate = 0
POLL_TIME = 5 # decided to hard-code rather than configure here
def _clean_task_id(task_id):
"""Clean the task ID so qsub allows it as a "name" string."""
for c in ['\n', '\t', '\r', '/', ':', '@', '\\', '*', '?', ',', '=', ' ', '(', ')']:
task_id = task_id.replace(c, '-')
return task_id
def _parse_qstat_state(qstat_out, job_id):
"""Parse "state" column from `qstat` output for given job_id
Returns state for the *first* job matching job_id. Returns 'u' if
`qstat` output is empty or job_id is not found.
"""
if qstat_out.strip() == '':
return 'u'
lines = qstat_out.split('\n')
# skip past header
while not lines.pop(0).startswith('---'):
pass
for line in lines:
if line:
job, prior, name, user, state = line.strip().split()[0:5]
if int(job) == int(job_id):
return state
return 'u'
def _parse_qsub_job_id(qsub_out):
"""Parse job id from qsub output string.
Assume format:
"Your job <job_id> ("<job_name>") has been submitted"
"""
return int(qsub_out.split()[2])
def _build_qsub_command(cmd, job_name, outfile, errfile, pe, n_cpu):
"""Submit shell command to SGE queue via `qsub`"""
qsub_template = """echo {cmd} | qsub -o ":{outfile}" -e ":{errfile}" -V -r y -pe {pe} {n_cpu} -N {job_name}"""
return qsub_template.format(
cmd=cmd, job_name=job_name, outfile=outfile, errfile=errfile,
pe=pe, n_cpu=n_cpu)
class SGEJobTask(luigi.Task):
"""Base class for executing a job on SunGrid Engine
Override ``work()`` (rather than ``run()``) with your job code.
Parameters:
- n_cpu: Number of CPUs (or "slots") to allocate for the Task. This
value is passed as ``qsub -pe {pe} {n_cpu}``
- parallel_env: SGE parallel environment name. The default is "orte",
the parallel environment installed with MIT StarCluster. If you
are using a different cluster environment, check with your
sysadmin for the right pe to use. This value is passed as {pe}
to the qsub command above.
- shared_tmp_dir: Shared drive accessible from all nodes in the cluster.
Task classes and dependencies are pickled to a temporary folder on
this drive. The default is ``/home``, the NFS share location setup
by StarCluster
"""
n_cpu = luigi.IntParameter(default=2, significant=False)
shared_tmp_dir = luigi.Parameter(default='/home', significant=False)
parallel_env = luigi.Parameter(default='orte', significant=False)
def _fetch_task_failures(self):
if not os.path.exists(self.errfile):
logger.info('No error file')
return []
with open(self.errfile, "r") as f:
errors = f.readlines()
if errors == []:
return errors
if errors[0].strip() == 'stdin: is not a tty': # SGE complains when we submit through a pipe
errors.pop(0)
return errors
def _init_local(self):
# Set up temp folder in shared directory (trim to max filename length)
base_tmp_dir = self.shared_tmp_dir
random_id = '%016x' % random.getrandbits(64)
folder_name = _clean_task_id(self.task_id) + '-' + random_id
self.tmp_dir = os.path.join(base_tmp_dir, folder_name)
max_filename_length = os.fstatvfs(0).f_namemax
self.tmp_dir = self.tmp_dir[:max_filename_length]
logger.info("Tmp dir: %s", self.tmp_dir)
os.makedirs(self.tmp_dir)
# Dump the code to be run into a pickle file
logging.debug("Dumping pickled class")
self._dump(self.tmp_dir)
# Make sure that all the class's dependencies are tarred and available
logging.debug("Tarballing dependencies")
# Grab luigi and the module containing the code to be run
packages = [luigi] + [__import__(self.__module__, None, None, 'dummy')]
luigi.hadoop.create_packages_archive(packages, os.path.join(self.tmp_dir, "packages.tar"))
def run(self):
self._init_local()
self._run_job()
# The procedure:
# - Pickle the class
# - Tarball the dependencies
# - Construct a qsub argument that runs a generic runner function with the path to the pickled class
# - Runner function loads the class from pickle
# - Runner class untars the dependencies
# - Runner function hits the button on the class's work() method
def work(self):
"""Override this method, rather than ``run()``, for your actual work."""
pass
def _dump(self, out_dir=''):
"""Dump instance to file."""
self.job_file = os.path.join(out_dir, 'job-instance.pickle')
if self.__module__ == '__main__':
d = pickle.dumps(self)
module_name = os.path.basename(sys.argv[0]).rsplit('.', 1)[0]
d = d.replace('(c__main__', "(c" + module_name)
open(self.job_file, "w").write(d)
else:
pickle.dump(self, open(self.job_file, "w"))
def _run_job(self):
# Build a qsub argument that will run sge_runner.py on the directory we've specified
runner_path = sge_runner.__file__
if runner_path.endswith("pyc"):
runner_path = runner_path[:-3] + "py"
job_str = 'python {0} "{1}"'.format(runner_path, self.tmp_dir) # enclose tmp_dir in quotes to protect from special escape chars
# Build qsub submit command
self.outfile = os.path.join(self.tmp_dir, 'job.out')
self.errfile = os.path.join(self.tmp_dir, 'job.err')
submit_cmd = _build_qsub_command(job_str, self.task_family, self.outfile,
self.errfile, self.parallel_env, self.n_cpu)
logger.debug('qsub command: \n' + submit_cmd)
# Submit the job and grab job ID
output = subprocess.check_output(submit_cmd, shell=True)
self.job_id = _parse_qsub_job_id(output)
logger.debug("Submitted job to qsub with response:\n" + output)
self._track_job()
# Now delete the temporaries, if they're there.
if self.tmp_dir and os.path.exists(self.tmp_dir):
logger.info('Removing temporary directory %s' % self.tmp_dir)
shutil.rmtree(self.tmp_dir)
def _track_job(self):
while True:
# Sleep for a little bit
time.sleep(POLL_TIME)
# See what the job's up to
# ASSUMPTION
qstat_out = subprocess.check_output(['qstat'])
sge_status = _parse_qstat_state(qstat_out, self.job_id)
if sge_status == 'r':
logger.info('Job is running...')
elif sge_status == 'qw':
logger.info('Job is pending...')
elif 'E' in sge_status:
logger.error('Job has FAILED:\n' + '\n'.join(self._fetch_task_failures()))
break
elif sge_status == 't' or sge_status == 'u':
# Then the job could either be failed or done.
errors = self._fetch_task_failures()
if not errors:
logger.info('Job is done')
else:
logger.error('Job has FAILED:\n' + '\n'.join(errors))
break
else:
logger.info('Job status is UNKNOWN!')
logger.info('Status is : %s' % sge_status)
raise Exception("job status isn't one of ['r', 'qw', 'E*', 't', 'u']: %s" % sge_status)
class LocalSGEJobTask(SGEJobTask):
"""A local version of SGEJobTask, for easier debugging.
This version skips the ``qsub`` steps and simply runs ``work()``
on the local node, so you don't need to be on an SGE cluster to
use your Task in a test workflow.
"""
def run(self):
self.work()
| apache-2.0 |
srsman/odoo | addons/stock/tests/test_resupply.py | 214 | 2457 | # -*- coding: utf-8 -*-
from openerp.addons.stock.tests.common import TestStockCommon
from openerp.tools import mute_logger, float_round
class TestResupply(TestStockCommon):
def setUp(self):
super(TestResupply, self).setUp()
self.Warehouse = self.env['stock.warehouse']
# create 2 WH, BIG and SMALL
# SMALL resupplies from BIG
self.bigwh = self.Warehouse.create({'name': 'BIG', 'code': 'B'})
self.smallwh = self.Warehouse.create({'name': 'SMALL', 'code': 'S',
'default_resupply_wh_id': self.bigwh.id,
'resupply_wh_ids': [(6, 0, [self.bigwh.id])],
})
# minimum stock rule for Product A on SMALL
Orderpoint = self.env['stock.warehouse.orderpoint']
Orderpoint.create({'warehouse_id': self.smallwh.id,
'location_id': self.smallwh.lot_stock_id.id,
'product_id': self.productA.id,
'product_min_qty': 100,
'product_max_qty': 200,
'product_uom': self.uom_unit.id,
})
# create some stock on BIG
Wiz = self.env['stock.change.product.qty']
wiz = Wiz.create({'product_id': self.productA.id,
'new_quantity': 1000,
'location_id': self.bigwh.lot_stock_id.id,
})
wiz.change_product_qty()
def test_resupply_from_wh(self):
sched = self.env['procurement.order']
sched.run_scheduler()
# we generated 2 procurements for product A: one on small wh and the
# other one on the transit location
procs = sched.search([('product_id', '=', self.productA.id)])
self.assertEqual(len(procs), 2)
proc1 = sched.search([('product_id', '=', self.productA.id),
('warehouse_id', '=', self.smallwh.id)])
self.assertEqual(proc1.state, 'running')
proc2 = sched.search([('product_id', '=', self.productA.id),
('warehouse_id', '=', self.bigwh.id)])
self.assertEqual(proc2.location_id.usage, 'transit')
self.assertNotEqual(proc2.state, 'exception')
proc2.run()
self.assertEqual(proc2.state, 'running')
self.assertTrue(proc2.rule_id)
| agpl-3.0 |
draperjames/qtpandas | qtpandas/ui/fallback/easygui/boxes/updatable_text_box.py | 1 | 8791 | """
.. moduleauthor:: Stephen Raymond Ferg and Robert Lugg (active)
.. default-domain:: py
.. highlight:: python
Version |release|
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import str
from builtins import int
from future import standard_library
standard_library.install_aliases()
import sys
if sys.hexversion >= 0x020600F0:
runningPython26 = True
else:
runningPython26 = False
if sys.hexversion >= 0x030000F0:
runningPython3 = True
else:
runningPython3 = False
# Try to import the Python Image Library. If it doesn't exist, only .gif
# images are supported.
try:
from PIL import Image as PILImage
from PIL import ImageTk as PILImageTk
except:
pass
if runningPython3:
from tkinter import *
import tkinter.filedialog as tk_FileDialog
from io import StringIO
else:
from tkinter import *
import tkinter.filedialog as tk_FileDialog
from io import StringIO
# Set up basestring appropriately
if runningPython3:
str = str
if TkVersion < 8.0:
stars = "*" * 75
print(("""\n\n\n""" + stars + """
You are running Tk version: """ + str(TkVersion) + """
You must be using Tk version 8.0 or greater to use EasyGui.
Terminating.
""" + stars + """\n\n\n"""))
sys.exit(0)
rootWindowPosition = "+300+200"
PROPORTIONAL_FONT_FAMILY = ("MS", "Sans", "Serif")
MONOSPACE_FONT_FAMILY = ("Courier")
PROPORTIONAL_FONT_SIZE = 10
# a little smaller, because it it more legible at a smaller size
MONOSPACE_FONT_SIZE = 9
TEXT_ENTRY_FONT_SIZE = 12 # a little larger makes it easier to see
STANDARD_SELECTION_EVENTS = ["Return", "Button-1", "space"]
# Initialize some global variables that will be reset later
__choiceboxMultipleSelect = None
__replyButtonText = None
__choiceboxResults = None
__firstWidget = None
__enterboxText = None
__enterboxDefaultText = ""
__multenterboxText = ""
choiceboxChoices = None
choiceboxWidget = None
entryWidget = None
boxRoot = None
#-------------------------------------------------------------------
# textbox
#-------------------------------------------------------------------
def textbox(msg="", title=" ", text="", codebox=0, get_updated_text=None):
"""
Display some text in a proportional font with line wrapping at word breaks.
This function is suitable for displaying general written text.
The text parameter should be a string, or a list or tuple of lines to be
displayed in the textbox.
:param str msg: the msg to be displayed
:param str title: the window title
:param str text: what to display in the textbox
:param str codebox: if 1, act as a codebox
"""
if msg is None:
msg = ""
if title is None:
title = ""
global boxRoot, __replyButtonText, __widgetTexts, buttonsFrame
global rootWindowPosition
choices = ["OK"]
__replyButtonText = choices[0]
boxRoot = Tk()
# Quit when x button pressed
boxRoot.protocol('WM_DELETE_WINDOW', boxRoot.quit)
screen_width = boxRoot.winfo_screenwidth()
screen_height = boxRoot.winfo_screenheight()
root_width = int((screen_width * 0.8))
root_height = int((screen_height * 0.5))
root_xpos = int((screen_width * 0.1))
root_ypos = int((screen_height * 0.05))
boxRoot.title(title)
boxRoot.iconname('Dialog')
rootWindowPosition = "+0+0"
boxRoot.geometry(rootWindowPosition)
boxRoot.expand = NO
boxRoot.minsize(root_width, root_height)
rootWindowPosition = '+{0}+{1}'.format(root_xpos, root_ypos)
boxRoot.geometry(rootWindowPosition)
mainframe = Frame(master=boxRoot)
mainframe.pack(side=TOP, fill=BOTH, expand=YES)
# ---- put frames in the window -----------------------------------
# we pack the textboxFrame first, so it will expand first
textboxFrame = Frame(mainframe, borderwidth=3)
textboxFrame.pack(side=BOTTOM, fill=BOTH, expand=YES)
message_and_buttonsFrame = Frame(mainframe)
message_and_buttonsFrame.pack(side=TOP, fill=X, expand=NO)
messageFrame = Frame(message_and_buttonsFrame)
messageFrame.pack(side=LEFT, fill=X, expand=YES)
buttonsFrame = Frame(message_and_buttonsFrame)
buttonsFrame.pack(side=RIGHT, expand=NO)
# -------------------- put widgets in the frames --------------------
# put a textArea in the top frame
if codebox:
character_width = int((root_width * 0.6) / MONOSPACE_FONT_SIZE)
textArea = Text(
textboxFrame, height=25, width=character_width, padx="2m", pady="1m")
textArea.configure(wrap=NONE)
textArea.configure(font=(MONOSPACE_FONT_FAMILY, MONOSPACE_FONT_SIZE))
else:
character_width = int((root_width * 0.6) / MONOSPACE_FONT_SIZE)
textArea = Text(
textboxFrame, height=25, width=character_width, padx="2m", pady="1m"
)
textArea.configure(wrap=WORD)
textArea.configure(
font=(PROPORTIONAL_FONT_FAMILY, PROPORTIONAL_FONT_SIZE))
# some simple keybindings for scrolling
mainframe.bind("<Next>", textArea.yview_scroll(1, PAGES))
mainframe.bind("<Prior>", textArea.yview_scroll(-1, PAGES))
mainframe.bind("<Right>", textArea.xview_scroll(1, PAGES))
mainframe.bind("<Left>", textArea.xview_scroll(-1, PAGES))
mainframe.bind("<Down>", textArea.yview_scroll(1, UNITS))
mainframe.bind("<Up>", textArea.yview_scroll(-1, UNITS))
# add a vertical scrollbar to the frame
rightScrollbar = Scrollbar(
textboxFrame, orient=VERTICAL, command=textArea.yview)
textArea.configure(yscrollcommand=rightScrollbar.set)
# add a horizontal scrollbar to the frame
bottomScrollbar = Scrollbar(
textboxFrame, orient=HORIZONTAL, command=textArea.xview)
textArea.configure(xscrollcommand=bottomScrollbar.set)
# pack the textArea and the scrollbars. Note that although we must define
# the textArea first, we must pack it last, so that the bottomScrollbar will
# be located properly.
# Note that we need a bottom scrollbar only for code.
# Text will be displayed with wordwrap, so we don't need to have a horizontal
# scroll for it.
if codebox:
bottomScrollbar.pack(side=BOTTOM, fill=X)
rightScrollbar.pack(side=RIGHT, fill=Y)
textArea.pack(side=LEFT, fill=BOTH, expand=YES)
# ---------- put a msg widget in the msg frame-------------------
messageWidget = Message(
messageFrame, anchor=NW, text=msg, width=int(root_width * 0.9))
messageWidget.configure(
font=(PROPORTIONAL_FONT_FAMILY, PROPORTIONAL_FONT_SIZE))
messageWidget.pack(side=LEFT, expand=YES, fill=BOTH, padx='1m', pady='1m')
# put the buttons in the buttonsFrame
okButton = Button(
buttonsFrame, takefocus=YES, text="Update", height=1, width=6)
okButton.pack(
expand=NO, side=TOP, padx='2m', pady='1m', ipady="1m", ipadx="2m")
def __update_myself(event):
new_text = get_updated_text()
textArea.delete(1.0, END)
textArea.insert('end', new_text, "normal")
# for the commandButton, bind activation events to the activation event
# handler
commandButton = okButton
handler = __textboxOK
handler = __update_myself
for selectionEvent in ["Return", "Button-1", "Escape"]:
commandButton.bind("<%s>" % selectionEvent, handler)
# ----------------- the action begins ------------------------------------
try:
# load the text into the textArea
if isinstance(text, str):
pass
else:
try:
text = "".join(text) # convert a list or a tuple to a string
except:
msgbox(
"Exception when trying to convert {} to text in textArea".format(type(text)))
sys.exit(16)
textArea.insert('end', text, "normal")
except:
msgbox("Exception when trying to load the textArea.")
sys.exit(16)
try:
okButton.focus_force()
except:
msgbox("Exception when trying to put focus on okButton.")
sys.exit(16)
boxRoot.mainloop()
# this line MUST go before the line that destroys boxRoot
areaText = textArea.get(0.0, 'end-1c')
boxRoot.destroy()
return areaText # return __replyButtonText
def __textboxOK(event):
global boxRoot
boxRoot.quit()
def update(reply=None):
return "To close, use the x button"
def _demo_textbox():
title = "Demo of updatable textbox"
msg = "Push update button to update. " * 16
text_snippet = ((
"Update button!!!. " * 5) + "\n\n") * 10
reply = textbox(msg, title, text_snippet, get_updated_text=update)
print(("Reply was: {!s}".format(reply))) | mit |
zploskey/servo | tests/wpt/css-tests/tools/html5lib/html5lib/treewalkers/etree.py | 658 | 4613 | from __future__ import absolute_import, division, unicode_literals
try:
from collections import OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict
except ImportError:
OrderedDict = dict
import gettext
_ = gettext.gettext
import re
from six import text_type
from . import _base
from ..utils import moduleFactoryFactory
tag_regexp = re.compile("{([^}]*)}(.*)")
def getETreeBuilder(ElementTreeImplementation):
ElementTree = ElementTreeImplementation
ElementTreeCommentType = ElementTree.Comment("asd").tag
class TreeWalker(_base.NonRecursiveTreeWalker):
"""Given the particular ElementTree representation, this implementation,
to avoid using recursion, returns "nodes" as tuples with the following
content:
1. The current element
2. The index of the element relative to its parent
3. A stack of ancestor elements
4. A flag "text", "tail" or None to indicate if the current node is a
text node; either the text or tail of the current element (1)
"""
def getNodeDetails(self, node):
if isinstance(node, tuple): # It might be the root Element
elt, key, parents, flag = node
if flag in ("text", "tail"):
return _base.TEXT, getattr(elt, flag)
else:
node = elt
if not(hasattr(node, "tag")):
node = node.getroot()
if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"):
return (_base.DOCUMENT,)
elif node.tag == "<!DOCTYPE>":
return (_base.DOCTYPE, node.text,
node.get("publicId"), node.get("systemId"))
elif node.tag == ElementTreeCommentType:
return _base.COMMENT, node.text
else:
assert type(node.tag) == text_type, type(node.tag)
# This is assumed to be an ordinary element
match = tag_regexp.match(node.tag)
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = node.tag
attrs = OrderedDict()
for name, value in list(node.attrib.items()):
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (_base.ELEMENT, namespace, tag,
attrs, len(node) or node.text)
def getFirstChild(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
element, key, parents, flag = node, None, [], None
if flag in ("text", "tail"):
return None
else:
if element.text:
return element, key, parents, "text"
elif len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
def getNextSibling(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
else:
if element.tail and flag != "tail":
return element, key, parents, "tail"
elif key < len(parents[-1]) - 1:
return parents[-1][key + 1], key + 1, parents, None
else:
return None
def getParentNode(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if not parents:
return element
else:
return element, key, parents, None
else:
parent = parents.pop()
if not parents:
return parent
else:
return parent, list(parents[-1]).index(parent), parents, None
return locals()
getETreeModule = moduleFactoryFactory(getETreeBuilder)
| mpl-2.0 |
andela-bojengwa/talk | venv/lib/python2.7/site-packages/django/contrib/gis/geos/factory.py | 293 | 1033 | from django.contrib.gis import memoryview
from django.contrib.gis.geos.geometry import GEOSGeometry, wkt_regex, hex_regex
from django.utils import six
def fromfile(file_h):
"""
Given a string file name, returns a GEOSGeometry. The file may contain WKB,
WKT, or HEX.
"""
# If given a file name, get a real handle.
if isinstance(file_h, six.string_types):
with open(file_h, 'rb') as file_h:
buf = file_h.read()
else:
buf = file_h.read()
# If we get WKB need to wrap in memoryview(), so run through regexes.
if isinstance(buf, bytes):
try:
decoded = buf.decode()
if wkt_regex.match(decoded) or hex_regex.match(decoded):
return GEOSGeometry(decoded)
except UnicodeDecodeError:
pass
else:
return GEOSGeometry(buf)
return GEOSGeometry(memoryview(buf))
def fromstr(string, **kwargs):
"Given a string value, returns a GEOSGeometry object."
return GEOSGeometry(string, **kwargs)
| mit |
mikedchavez1010/XX-Net | python27/1.0/lib/win32/gevent/lock.py | 22 | 3074 | # Copyright (c) 2009-2012 Denis Bilenko. See LICENSE for details.
"""Locking primitives"""
from gevent.hub import getcurrent
from gevent._semaphore import Semaphore
__all__ = ['Semaphore', 'DummySemaphore', 'BoundedSemaphore', 'RLock']
class DummySemaphore(object):
# XXX what is this used for?
"""A Semaphore initialized with "infinite" initial value. None of its methods ever block."""
def __str__(self):
return '<%s>' % self.__class__.__name__
def locked(self):
return False
def release(self):
pass
def rawlink(self, callback):
# XXX should still work and notify?
pass
def unlink(self, callback):
pass
def wait(self, timeout=None):
pass
def acquire(self, blocking=True, timeout=None):
pass
def __enter__(self):
pass
def __exit__(self, typ, val, tb):
pass
class BoundedSemaphore(Semaphore):
"""A bounded semaphore checks to make sure its current value doesn't exceed its initial value.
If it does, ``ValueError`` is raised. In most situations semaphores are used to guard resources
with limited capacity. If the semaphore is released too many times it's a sign of a bug.
If not given, *value* defaults to 1."""
def __init__(self, value=1):
Semaphore.__init__(self, value)
self._initial_value = value
def release(self):
if self.counter >= self._initial_value:
raise ValueError("Semaphore released too many times")
return Semaphore.release(self)
class RLock(object):
def __init__(self):
self._block = Semaphore(1)
self._owner = None
self._count = 0
def __repr__(self):
return "<%s at 0x%x _block=%s _count=%r _owner=%r)>" % (
self.__class__.__name__,
id(self),
self._block,
self._count,
self._owner)
def acquire(self, blocking=1):
me = getcurrent()
if self._owner is me:
self._count = self._count + 1
return 1
rc = self._block.acquire(blocking)
if rc:
self._owner = me
self._count = 1
return rc
def __enter__(self):
return self.acquire()
def release(self):
if self._owner is not getcurrent():
raise RuntimeError("cannot release un-aquired lock")
self._count = count = self._count - 1
if not count:
self._owner = None
self._block.release()
def __exit__(self, typ, value, tb):
self.release()
# Internal methods used by condition variables
def _acquire_restore(self, count_owner):
count, owner = count_owner
self._block.acquire()
self._count = count
self._owner = owner
def _release_save(self):
count = self._count
self._count = 0
owner = self._owner
self._owner = None
self._block.release()
return (count, owner)
def _is_owned(self):
return self._owner is getcurrent()
| bsd-2-clause |
apophys/err | errbot/storage/base.py | 8 | 2062 | from abc import abstractmethod
from typing import Any, Iterable
class StorageBase(object):
"""
Contract to implemement a storage.
"""
@abstractmethod
def set(self, key: str, value: Any) -> None:
"""
Atomically set the key to the given value.
The caller of set will protect against set on non open.
:param key: string as key
:param value: pickalable python object
"""
pass
@abstractmethod
def get(self, key: str) -> Any:
"""
Get the value stored for key. Raises KeyError if the key doesn't exist.
The caller of get will protect against get on non open.
:param key: the key
:return: the value
"""
pass
@abstractmethod
def remove(self, key: str) -> None:
"""
Remove key. Raises KeyError if the key doesn't exist.
The caller of get will protect against get on non open.
:param key: the key
"""
pass
@abstractmethod
def len(self) -> int:
"""
:return: the number of keys set.
"""
pass
@abstractmethod
def keys(self) -> Iterable[str]:
"""
:return: an iterator on all the entries
"""
pass
@abstractmethod
def close(self) -> None:
"""
Sync and close the storage.
The caller of close will protect against close on non open and double close.
"""
pass
class StoragePluginBase(object):
"""
Base to implement a storage plugin.
This is a factory for the namespaces.
"""
def __init__(self, bot_config):
self._storage_config = getattr(bot_config, 'STORAGE_CONFIG', {})
@abstractmethod
def open(self, namespace: str) -> StorageBase:
"""
Open the storage with the given namespace (core, or plugin name) and config.
The caller of open will protect against double opens.
:param namespace: a namespace to isolate the plugin storages.
:return:
"""
pass
| gpl-3.0 |
stansonhealth/ansible-modules-core | packaging/os/package.py | 36 | 2098 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Ansible, inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: package
version_added: 2.0
author:
- Ansible Inc
maintainers:
- Ansible Core Team
short_description: Generic OS package manager
description:
- Installs, upgrade and removes packages using the underlying OS package manager.
options:
name:
description:
- "Package name, or package specifier with version, like C(name-1.0)."
- "Be aware that packages are not always named the same and this module will not 'translate' them per distro."
required: true
state:
description:
- Whether to install (C(present), C(latest)), or remove (C(absent)) a package.
required: true
use:
description:
- The required package manager module to use (yum, apt, etc). The default 'auto' will use existing facts or try to autodetect it.
- You should only use this field if the automatic selection is not working for some reason.
required: false
default: auto
requirements:
- Whatever is required for the package plugins specific for each system.
notes:
- This module actually calls the pertinent package modules for each system (apt, yum, etc).
'''
EXAMPLES = '''
- name: install the latest version of ntpdate
package: name=ntpdate state=latest
# This uses a variable as this changes per distribution.
- name: remove the apache package
package: name={{apache}} state=absent
'''
| gpl-3.0 |
born2net/componentSample | node_modules/browser-sync/node_modules/socket.io/node_modules/engine.io/node_modules/engine.io-parser/node_modules/utf8/tests/generate-test-data.py | 2214 | 1347 | #!/usr/bin/env python
import re
import json
# http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
| mit |
noba3/KoTos | addons/plugin.video.tvondesizonexl/xoze/snapvideo/Movshare.py | 3 | 1782 | '''
Created on Dec 22, 2011
@author: ajju
'''
from xoze.snapvideo import VideoHost, Video, STREAM_QUAL_SD
from xoze.utils import http
import re
def getVideoHost():
video_host = VideoHost()
video_host.set_icon('http://www.movshare.net/images/logo.png')
video_host.set_name('Movshare')
return video_host
def retrieveVideoInfo(video_id):
video = Video()
video.set_video_host(getVideoHost())
video.set_id(video_id)
try:
http.HttpClient().enable_cookies()
video_info_link = 'http://www.movshare.net/video/' + str(video_id)
html = http.HttpClient().get_html_content(url=video_info_link)
if re.search(r'Video hosting is expensive. We need you to prove you\'re human.', html):
html = http.HttpClient().get_html_content(url=video_info_link)
video_info_link = re.compile('<embed type="video/divx" src="(.+?)"').findall(html)
video_link = ''
if len(video_info_link) == 0:
domainStr = re.compile('flashvars.domain="(.+?)"').findall(html)[0]
fileStr = re.compile('flashvars.file="(.+?)"').findall(html)[0]
filekeyStr = re.compile('flashvars.filekey="(.+?)"').findall(html)[0]
video_info_link = domainStr + '/api/player.api.php?user=undefined&pass=undefined&codes=1&file=' + fileStr + '&key=' + filekeyStr
html = http.HttpClient().get_html_content(url=video_info_link)
video_link = re.compile(r'url=(.+?)&').findall(html)[0]
else:
video_link = video_info_link[0]
http.HttpClient().disable_cookies()
video.set_stopped(False)
video.add_stream_link(STREAM_QUAL_SD, video_link)
except:
video.set_stopped(True)
return video
| gpl-2.0 |
azurewraith/django-storages | storages/backends/gs.py | 19 | 4060 | try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO # noqa
from django.core.exceptions import ImproperlyConfigured
from storages.backends.s3boto import S3BotoStorage, S3BotoStorageFile
from storages.utils import setting
try:
from boto.gs.connection import GSConnection, SubdomainCallingFormat
from boto.exception import GSResponseError
from boto.gs.key import Key as GSKey
except ImportError:
raise ImproperlyConfigured("Could not load Boto's Google Storage bindings.\n"
"See https://github.com/boto/boto")
class GSBotoStorageFile(S3BotoStorageFile):
def write(self, content):
if 'w' not in self._mode:
raise AttributeError("File was not opened in write mode.")
self.file = StringIO(content)
self._is_dirty = True
def close(self):
if self._is_dirty:
provider = self.key.bucket.connection.provider
upload_headers = {provider.acl_header: self._storage.default_acl}
upload_headers.update(self._storage.headers)
self._storage._save_content(self.key, self.file, upload_headers)
self.key.close()
class GSBotoStorage(S3BotoStorage):
connection_class = GSConnection
connection_response_error = GSResponseError
file_class = GSBotoStorageFile
key_class = GSKey
access_key_names = ['GS_ACCESS_KEY_ID']
secret_key_names = ['GS_SECRET_ACCESS_KEY']
access_key = setting('GS_ACCESS_KEY_ID')
secret_key = setting('GS_SECRET_ACCESS_KEY')
file_overwrite = setting('GS_FILE_OVERWRITE', True)
headers = setting('GS_HEADERS', {})
bucket_name = setting('GS_BUCKET_NAME', None)
auto_create_bucket = setting('GS_AUTO_CREATE_BUCKET', False)
default_acl = setting('GS_DEFAULT_ACL', 'public-read')
bucket_acl = setting('GS_BUCKET_ACL', default_acl)
querystring_auth = setting('GS_QUERYSTRING_AUTH', True)
querystring_expire = setting('GS_QUERYSTRING_EXPIRE', 3600)
durable_reduced_availability = setting('GS_DURABLE_REDUCED_AVAILABILITY', False)
location = setting('GS_LOCATION', '')
custom_domain = setting('GS_CUSTOM_DOMAIN')
calling_format = setting('GS_CALLING_FORMAT', SubdomainCallingFormat())
secure_urls = setting('GS_SECURE_URLS', True)
file_name_charset = setting('GS_FILE_NAME_CHARSET', 'utf-8')
is_gzipped = setting('GS_IS_GZIPPED', False)
preload_metadata = setting('GS_PRELOAD_METADATA', False)
gzip_content_types = setting('GS_GZIP_CONTENT_TYPES', (
'text/css',
'application/javascript',
'application/x-javascript',
))
url_protocol = setting('GS_URL_PROTOCOL', 'http:')
def _save_content(self, key, content, headers):
# only pass backwards incompatible arguments if they vary from the default
options = {}
if self.encryption:
options['encrypt_key'] = self.encryption
key.set_contents_from_file(content, headers=headers,
policy=self.default_acl,
rewind=True, **options)
def _get_or_create_bucket(self, name):
"""
Retrieves a bucket if it exists, otherwise creates it.
"""
if self.durable_reduced_availability:
storage_class = 'DURABLE_REDUCED_AVAILABILITY'
else:
storage_class = 'STANDARD'
try:
return self.connection.get_bucket(name,
validate=self.auto_create_bucket)
except self.connection_response_error:
if self.auto_create_bucket:
bucket = self.connection.create_bucket(name, storage_class=storage_class)
bucket.set_acl(self.bucket_acl)
return bucket
raise ImproperlyConfigured("Bucket %s does not exist. Buckets "
"can be automatically created by "
"setting GS_AUTO_CREATE_BUCKET to "
"``True``." % name)
| bsd-3-clause |
MatthieuBizien/scikit-learn | sklearn/manifold/setup.py | 24 | 1279 | import os
from os.path import join
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("manifold", parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_utils",
sources=["_utils.c"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
cblas_libs, blas_info = get_blas_info()
eca = blas_info.pop('extra_compile_args', [])
eca.append("-O4")
config.add_extension("_barnes_hut_tsne",
libraries=cblas_libs,
sources=["_barnes_hut_tsne.c"],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=eca, **blas_info)
config.add_subpackage('tests')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
Cojacfar/Maker | comm/lib/python2.7/site-packages/django/contrib/messages/tests/test_cookie.py | 37 | 6167 | import json
from django.contrib.messages import constants
from django.contrib.messages.tests.base import BaseTests
from django.contrib.messages.storage.cookie import (CookieStorage,
MessageEncoder, MessageDecoder)
from django.contrib.messages.storage.base import Message
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.safestring import SafeData, mark_safe
def set_cookie_data(storage, messages, invalid=False, encode_empty=False):
"""
Sets ``request.COOKIES`` with the encoded data and removes the storage
backend's loaded data cache.
"""
encoded_data = storage._encode(messages, encode_empty=encode_empty)
if invalid:
# Truncate the first character so that the hash is invalid.
encoded_data = encoded_data[1:]
storage.request.COOKIES = {CookieStorage.cookie_name: encoded_data}
if hasattr(storage, '_loaded_data'):
del storage._loaded_data
def stored_cookie_messages_count(storage, response):
"""
Returns an integer containing the number of messages stored.
"""
# Get a list of cookies, excluding ones with a max-age of 0 (because
# they have been marked for deletion).
cookie = response.cookies.get(storage.cookie_name)
if not cookie or cookie['max-age'] == 0:
return 0
data = storage._decode(cookie.value)
if not data:
return 0
if data[-1] == CookieStorage.not_finished:
data.pop()
return len(data)
@override_settings(SESSION_COOKIE_DOMAIN='.example.com')
class CookieTest(BaseTests, TestCase):
storage_class = CookieStorage
def stored_messages_count(self, storage, response):
return stored_cookie_messages_count(storage, response)
def test_get(self):
storage = self.storage_class(self.get_request())
# Set initial data.
example_messages = ['test', 'me']
set_cookie_data(storage, example_messages)
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), example_messages)
def test_domain(self):
"""
Ensure that CookieStorage honors SESSION_COOKIE_DOMAIN.
Refs #15618.
"""
# Test before the messages have been consumed
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'test')
storage.update(response)
self.assertTrue('test' in response.cookies['messages'].value)
self.assertEqual(response.cookies['messages']['domain'], '.example.com')
self.assertEqual(response.cookies['messages']['expires'], '')
# Test after the messages have been consumed
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'test')
for m in storage:
pass # Iterate through the storage to simulate consumption of messages.
storage.update(response)
self.assertEqual(response.cookies['messages'].value, '')
self.assertEqual(response.cookies['messages']['domain'], '.example.com')
self.assertEqual(response.cookies['messages']['expires'], 'Thu, 01-Jan-1970 00:00:00 GMT')
def test_get_bad_cookie(self):
request = self.get_request()
storage = self.storage_class(request)
# Set initial (invalid) data.
example_messages = ['test', 'me']
set_cookie_data(storage, example_messages, invalid=True)
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), [])
def test_max_cookie_length(self):
"""
Tests that, if the data exceeds what is allowed in a cookie, older
messages are removed before saving (and returned by the ``update``
method).
"""
storage = self.get_storage()
response = self.get_response()
# When storing as a cookie, the cookie has constant overhead of approx
# 54 chars, and each message has a constant overhead of about 37 chars
# and a variable overhead of zero in the best case. We aim for a message
# size which will fit 4 messages into the cookie, but not 5.
# See also FallbackTest.test_session_fallback
msg_size = int((CookieStorage.max_cookie_size - 54) / 4.5 - 37)
for i in range(5):
storage.add(constants.INFO, str(i) * msg_size)
unstored_messages = storage.update(response)
cookie_storing = self.stored_messages_count(storage, response)
self.assertEqual(cookie_storing, 4)
self.assertEqual(len(unstored_messages), 1)
self.assertTrue(unstored_messages[0].message == '0' * msg_size)
def test_json_encoder_decoder(self):
"""
Tests that a complex nested data structure containing Message
instances is properly encoded/decoded by the custom JSON
encoder/decoder classes.
"""
messages = [
{
'message': Message(constants.INFO, 'Test message'),
'message_list': [Message(constants.INFO, 'message %s') \
for x in range(5)] + [{'another-message': \
Message(constants.ERROR, 'error')}],
},
Message(constants.INFO, 'message %s'),
]
encoder = MessageEncoder(separators=(',', ':'))
value = encoder.encode(messages)
decoded_messages = json.loads(value, cls=MessageDecoder)
self.assertEqual(messages, decoded_messages)
def test_safedata(self):
"""
Tests that a message containing SafeData is keeping its safe status when
retrieved from the message storage.
"""
def encode_decode(data):
message = Message(constants.DEBUG, data)
encoded = storage._encode(message)
decoded = storage._decode(encoded)
return decoded.message
storage = self.get_storage()
self.assertIsInstance(
encode_decode(mark_safe("<b>Hello Django!</b>")), SafeData)
self.assertNotIsInstance(
encode_decode("<b>Hello Django!</b>"), SafeData)
| gpl-2.0 |
jarus/django-registration | setup.py | 32 | 1911 | from distutils.core import setup
import os
from registration import get_version
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir:
os.chdir(root_dir)
for dirpath, dirnames, filenames in os.walk('registration'):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
pkg = dirpath.replace(os.path.sep, '.')
if os.path.altsep:
pkg = pkg.replace(os.path.altsep, '.')
packages.append(pkg)
elif filenames:
prefix = dirpath[13:] # Strip "registration/" or "registration\"
for f in filenames:
data_files.append(os.path.join(prefix, f))
setup(name='django-registration',
version=get_version().replace(' ', '-'),
description='An extensible user-registration application for Django',
author='James Bennett',
author_email='james@b-list.org',
url='http://www.bitbucket.org/ubernostrum/django-registration/',
download_url='https://bitbucket.org/ubernostrum/django-registration/downloads/django-registration-1.0.tar.gz',
package_dir={'registration': 'registration'},
packages=packages,
package_data={'registration': data_files},
classifiers=['Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'],
)
| bsd-3-clause |
nwjs/chromium.src | third_party/mako/test/test_tgplugin.py | 13 | 1544 | from mako import compat
from mako.ext.turbogears import TGPlugin
from test import template_base
from test import TemplateTest
from test.util import result_lines
tl = TGPlugin(options=dict(directories=[template_base]), extension="html")
class TestTGPlugin(TemplateTest):
def test_basic(self):
t = tl.load_template("/index.html")
assert result_lines(t.render()) == ["this is index"]
def test_subdir(self):
t = tl.load_template("/subdir/index.html")
assert result_lines(t.render()) == [
"this is sub index",
"this is include 2",
]
assert (
tl.load_template("/subdir/index.html").module_id
== "_subdir_index_html"
)
def test_basic_dot(self):
t = tl.load_template("index")
assert result_lines(t.render()) == ["this is index"]
def test_subdir_dot(self):
t = tl.load_template("subdir.index")
assert result_lines(t.render()) == [
"this is sub index",
"this is include 2",
]
assert (
tl.load_template("subdir.index").module_id == "_subdir_index_html"
)
def test_string(self):
t = tl.load_template("foo", "hello world")
assert t.render() == "hello world"
def test_render(self):
assert result_lines(tl.render({}, template="/index.html")) == [
"this is index"
]
assert result_lines(
tl.render({}, template=compat.u("/index.html"))
) == ["this is index"]
| bsd-3-clause |
slabanja/ase | ase/cluster/factory.py | 1 | 6685 | import numpy as np
from ase import Atoms
from ase.data import atomic_numbers
from ase.lattice.spacegroup import Spacegroup
from ase.cluster.base import ClusterBase
from ase.cluster.cluster import Cluster
class ClusterFactory(ClusterBase):
directions = [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]
atomic_basis = np.array([[0., 0., 0.]])
def __call__(self, symbol, surfaces, layers, latticeconstant=None,
center=None, vacuum=0.0, debug=0):
self.debug = debug
# Interpret symbol
if isinstance(symbol, str):
self.atomic_number = atomic_numbers[symbol]
else:
self.atomic_number = symbol
self.set_lattice_constant(latticeconstant)
self.set_basis()
if self.debug:
print "Lattice constant(s):", self.lattice_constant
print "Lattice basis:\n", self.lattice_basis
print "Resiprocal basis:\n", self.resiproc_basis
print "Atomic basis:\n", self.atomic_basis
self.set_surfaces_layers(surfaces, layers)
self.set_lattice_size(center)
if self.debug:
print "Center position:", self.center.round(2)
print "Base lattice size:", self.size
cluster = self.make_cluster(vacuum)
cluster.symmetry = self.xtal_name
cluster.center = self.center.copy()
cluster.surfaces = self.surfaces.copy()
cluster.lattice_basis = self.lattice_basis.copy()
cluster.atomic_basis = self.atomic_basis.copy()
cluster.resiproc_basis = self.resiproc_basis.copy()
return cluster
def make_cluster(self, vacuum):
# Make the base crystal by repeating the unit cell
size = np.array(self.size)
translations = np.zeros((size.prod(), 3))
for h in range(size[0]):
for k in range(size[1]):
for l in range(size[2]):
i = h * (size[1] * size[2]) + k * size[2] + l
translations[i] = np.dot([h, k, l], self.lattice_basis)
atomic_basis = np.dot(self.atomic_basis, self.lattice_basis)
positions = np.zeros((len(translations) * len(atomic_basis), 3))
n = len(atomic_basis)
for i, trans in enumerate(translations):
positions[n*i:n*(i+1)] = atomic_basis + trans
# Remove all atoms that is outside the defined surfaces
for s, l in zip(self.surfaces, self.layers):
n = self.miller_to_direction(s)
rmax = self.get_layer_distance(s, l + 0.1)
r = np.dot(positions - self.center, n)
mask = np.less(r, rmax)
if self.debug > 1:
print "Cutting %s at %i layers ~ %.3f A" % (s, l, rmax)
positions = positions[mask]
# Fit the cell, so it only just consist the atoms
min = np.zeros(3)
max = np.zeros(3)
for i in range(3):
v = self.directions[i]
r = np.dot(positions, v)
min[i] = r.min()
max[i] = r.max()
cell = max - min + vacuum
positions = positions - min + vacuum / 2.0
self.center = self.center - min + vacuum / 2.0
return Cluster(symbols=[self.atomic_number] * len(positions),
positions=positions, cell=cell)
def set_lattice_size(self, center):
if center is None:
offset = np.zeros(3)
else:
offset = np.array(center)
if (offset > 1.0).any() or (offset < 0.0).any():
raise ValueError("Center offset must lie within the lattice unit \
cell.")
max = np.ones(3)
min = -np.ones(3)
v = np.linalg.inv(self.lattice_basis.T)
for s, l in zip(self.surfaces, self.layers):
n = self.miller_to_direction(s) * self.get_layer_distance(s, l)
k = np.round(np.dot(v, n), 2)
for i in range(3):
if k[i] > 0.0:
k[i] = np.ceil(k[i])
elif k[i] < 0.0:
k[i] = np.floor(k[i])
if self.debug > 1:
print "Spaning %i layers in %s in lattice basis ~ %s" % (l, s, k)
max[k > max] = k[k > max]
min[k < min] = k[k < min]
self.center = np.dot(offset - min, self.lattice_basis)
self.size = (max - min + np.ones(3)).astype(int)
def set_surfaces_layers(self, surfaces, layers):
if len(surfaces) != len(layers):
raise ValueError("Improper size of surface and layer arrays: %i != %i"
% (len(surfaces), len(layers)))
sg = Spacegroup(self.spacegroup)
surfaces = np.array(surfaces)
layers = np.array(layers)
for i, s in enumerate(surfaces):
s = reduce_miller(s)
surfaces[i] = s
surfaces_full = surfaces.copy()
layers_full = layers.copy()
for s, l in zip(surfaces, layers):
equivalent_surfaces = sg.equivalent_reflections(s.reshape(-1, 3))
for es in equivalent_surfaces:
# If the equivalent surface (es) is not in the surface list,
# then append it.
if not np.equal(es, surfaces_full).all(axis=1).any():
surfaces_full = np.append(surfaces_full, es.reshape(1, 3), axis=0)
layers_full = np.append(layers_full, l)
self.surfaces = surfaces_full.copy()
self.layers = layers_full.copy()
def get_resiproc_basis(self, basis):
"""Returns the resiprocal basis to a given lattice (crystal) basis"""
k = 1 / np.dot(basis[0], cross(basis[1], basis[2]))
# The same as the inversed basis matrix transposed
return k * np.array([cross(basis[1], basis[2]),
cross(basis[2], basis[0]),
cross(basis[0], basis[1])])
# Helping functions
def cross(a, b):
"""The cross product of two vectors."""
return np.array([a[1]*b[2] - b[1]*a[2],
a[2]*b[0] - b[2]*a[0],
a[0]*b[1] - b[0]*a[1]])
def GCD(a,b):
"""Greatest Common Divisor of a and b."""
#print "--"
while a != 0:
#print a,b,">",
a,b = b%a,a
#print a,b
return b
def reduce_miller(hkl):
"""Reduce Miller index to the lowest equivalent integers."""
hkl = np.array(hkl)
old = hkl.copy()
d = GCD(GCD(hkl[0], hkl[1]), hkl[2])
while d != 1:
hkl = hkl / d
d = GCD(GCD(hkl[0], hkl[1]), hkl[2])
if np.dot(old, hkl) > 0:
return hkl
else:
return -hkl
| gpl-2.0 |
pllim/ginga | ginga/rv/plugins/Preferences.py | 1 | 63607 | # This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
"""
Make changes to channel settings graphically in the UI.
**Plugin Type: Local**
``Preferences`` is a local plugin, which means it is associated with a
channel. An instance can be opened for each channel.
**Usage**
The ``Preferences`` plugin sets the preferences on a per-channel basis.
The preferences for a given channel are inherited from the "Image"
channel until they are explicitly set and saved using this plugin.
If "Save Settings" is pressed, it will save the settings to the user's
home Ginga folder so that when a channel with the same name is created
in future Ginga sessions it will obtain the same settings.
**Color Distribution Preferences**
.. figure:: figures/cdist-prefs.png
:align: center
:alt: Color Distribution preferences
"Color Distribution" preferences.
The "Color Distribution" preferences control the preferences used for the
data value to color index conversion that occurs after cut levels are
applied and just before final color mapping is performed. It concerns
how the values between the low and high cut levels are distributed to
the color and intensity mapping phase.
The "Algorithm" control is used to set the algorithm used for the
mapping. Click the control to show the list, or simply scroll the mouse
wheel while hovering the cursor over the control. There are eight
algorithms available: linear, log, power, sqrt, squared, asinh, sinh,
and histeq. The name of each algorithm is indicative of how
the data is mapped to the colors in the color map. "linear" is the
default.
**Color Mapping Preferences**
.. figure:: figures/cmap-prefs.png
:align: center
:alt: Color Mapping preferences
"Color Mapping" preferences.
The "Color Mapping" preferences control the preferences used for the
color map and intensity map, used during the final phase of the color
mapping process. Together with the "Color Distribution" preferences, these
control the mapping of data values into a 24-bpp RGB visual representation.
The "Colormap" control selects which color map should be loaded and
used. Click the control to show the list, or simply scroll the mouse
wheel while hovering the cursor over the control.
The "Intensity" control selects which intensity map should be used
with the color map. The intensity map is applied just before the color
map, and can be used to change the standard linear scale of values into
an inverted scale, logarithmic, etc.
Ginga comes with a good selection of color maps, but should you want
more, you can add custom ones or, if ``matplotlib`` is installed, you
can load all the ones that it has.
See "Customizing Ginga" for details.
**Zoom Preferences**
.. figure:: figures/zoom-prefs.png
:align: center
:alt: Zoom preferences
"Zoom" preferences.
The "Zoom" preferences control Ginga's zooming/scaling behavior.
Ginga supports two zoom algorithms, chosen using the "Zoom Alg" control:
* The "step" algorithm zooms the image inwards in discrete
steps of 1X, 2X, 3X, etc. or outwards in steps of 1/2X, 1/3X, 1/4X,
etc. This algorithm results in the least artifacts visually, but is a
bit slower to zoom over wide ranges when using a scrolling motion
because more "throw" is required to achieve a large zoom change
(this is not the case if one uses of the shortcut zoom keys, such as
the digit keys).
* The "rate" algorithm zooms the image by advancing the scaling at
a rate defined by the value in the "Zoom Rate" box. This rate defaults
to the square root of 2. Larger numbers cause larger changes in scale
between zoom levels. If you like to zoom your images rapidly, at a
small cost in image quality, you would likely want to choose this
option.
Note that regardless of which method is chosen for the zoom algorithm,
the zoom can be controlled by holding down ``Ctrl`` (coarse) or ``Shift``
(fine) while scrolling to constrain the zoom rate (assuming the default
mouse bindings).
The "Stretch XY" control can be used to stretch one of the axes (X or
Y) relative to the other. Select an axis with this control and roll the
scroll wheel while hovering over the "Stretch Factor" control to
stretch the pixels in the selected axis.
The "Scale X" and "Scale Y" controls offer direct access to the
underlying scaling, bypassing the discrete zoom steps. Here, exact
values can be typed to scale the image. Conversely, you will see these
values change as the image is zoomed.
The "Scale Min" and "Scale Max" controls can be used to place a
limit on how much the image can be scaled.
The "Zoom Defaults" button will restore the controls to the Ginga
default values.
**Pan Preferences**
.. figure:: figures/pan-prefs.png
:align: center
:alt: Pan Preferences
"Pan" preferences.
The "Pan" preferences control Ginga's panning behavior.
The "Pan X" and "Pan Y" controls offer direct access to set the pan
position in the image (the part of the image located at the center of
the window) -- you can see them change as you pan around the image.
The "Center Image" button sets the pan position to the center of the
image, as calculated by halving the dimensions in X and Y.
The "Mark Center" check box, when checked, will cause Ginga to draw a
small reticle in the center of the image. This is useful for knowing
the pan position and for debugging.
**Transform Preferences**
.. figure:: figures/transform-prefs.png
:align: center
:alt: Transform Preferences
"Transform" preferences.
The "Transform" preferences provide for transforming the view of the image
by flipping the view in X or Y, swapping the X and Y axes, or rotating
the image in arbitrary amounts.
The "Flip X" and "Flip Y" checkboxes cause the image view to be
flipped in the corresponding axis.
The "Swap XY" checkbox causes the image view to be altered by swapping
the X and Y axes. This can be combined with "Flip X" and "Flip Y" to rotate
the image in 90 degree increments. These views will render more quickly
than arbitrary rotations using the "Rotate" control.
The "Rotate" control will rotate the image view the specified amount.
The value should be specified in degrees. "Rotate" can be specified in
conjunction with flipping and swapping.
The "Restore" button will restore the view to the default view, which
is unflipped, unswapped, and unrotated.
**Auto Cuts Preferences**
.. figure:: figures/autocuts-prefs.png
:align: center
:alt: Auto Cuts Preferences
"Auto Cuts" preferences.
The "Auto Cuts" preferences control the calculation of cut levels for
the view when the auto cut levels button or key is pressed, or when
loading a new image with auto cuts enabled. You can also set the cut
levels manually from here.
The "Cut Low" and "Cut High" fields can be used to manually specify lower
and upper cut levels. Pressing "Cut Levels" will set the levels to these
values manually. If a value is missing, it is assumed to default to the
whatever the current value is.
Pressing "Auto Levels" will calculate the levels according to an algorithm.
The "Auto Method" control is used to choose which auto cuts algorithm
used: "minmax" (minimum maximum values), "median" (based on median
filtering), "histogram" (based on an image histogram), "stddev" (based on
the standard deviation of pixel values), or "zscale" (based on the ZSCALE
algorithm popularized by IRAF).
As the algorithm is changed, the boxes under it may also change to
allow changes to parameters particular to each algorithm.
**WCS Preferences**
.. figure:: figures/wcs-prefs.png
:align: center
:alt: WCS Preferences
"WCS" preferences.
The "WCS" preferences control the display preferences for the World
Coordinate System (WCS) calculations used to report the cursor position in the
image.
The "WCS Coords" control is used to select the coordinate system in
which to display the result.
The "WCS Display" control is used to select a sexagesimal (``H:M:S``)
readout or a decimal degrees readout.
**New Image Preferences**
.. figure:: figures/newimages-prefs.png
:align: center
:alt: New Image Preferences
"New Image" preferences.
The "New Images" preferences determine how Ginga reacts when a new image
is loaded into the channel. This includes when an older image is
revisited by clicking on its thumbnail in the ``Thumbs`` plugin pane.
The "Cut New" setting controls whether an automatic cut-level
calculation should be performed on the new image, or whether the
currently set cut levels should be applied. The possible settings are:
* "on": calculate a new cut levels always;
* "override": calculate a new cut levels until the user overrides
it by manually setting a cut levels, then turn "off"; or
* "off": always use the currently set cut levels.
.. tip:: The "override" setting is provided for the convenience of
having automatic cut levels, while preventing a manually set
cuts from being overridden when a new image is ingested. When
typed in the image window, the semicolon key can be used to
toggle the mode back to override (from "off"), while colon will
set the preference to "on". The ``Info`` panel shows
the state of this setting.
The "Zoom New" setting controls whether a newly visited image should
be zoomed to fit the window. There are three possible values: on,
override, and off:
* "on": the new image is always zoomed to fit;
* "override": images are automatically fitted until the zoom level is
changed manually, then the mode automatically changes to "off", or
* "off": always use the currently set zoom levels.
.. tip:: The "override" setting is provided for the convenience of
having an automatic zoom, while preventing a manually set zoom
level from being overridden when a new image is ingested. When
typed in the image window, the apostrophe (a.k.a. "single quote")
key can be used to toggle the mode back to "override" (from
"off"), while quote (a.k.a. double quote) will set the preference
to "on". The global plugin ``Info`` panel shows the state of this
setting.
The "Center New" box, if checked, will cause newly visited images to
always have the pan position reset to the center of the image. If
unchecked, the pan position is unchanged from the previous image.
The "Follow New" setting is used to control whether Ginga will change
the display if a new image is loaded into the channel. If unchecked,
the image is loaded (as seen, for example, by its appearance in the
``Thumbs`` tab), but the display will not change to the new image. This
setting is useful in cases where new images are being loaded by some
automated means into a channel and the user wishes to study the current
image without being interrupted.
The "Raise New" setting controls whether Ginga will raise the tab of a
channel when an image is loaded into that channel. If unchecked, then
Ginga will not raise the tab when an image is loaded into that
particular channel.
The "Create Thumbnail" setting controls whether Ginga will create a
thumbnail for images loaded into that channel. In cases where many
images are being loaded into a channel frequently (e.g., a low frequency
video feed), it may be undesirable to create thumbnails for all of them.
**General Preferences**
The "Num Images" setting specifies how many images can be retained in
buffers in this channel before being ejected. A value of zero (0) means
unlimited--images will never be ejected. If an image was loaded from
some accessible storage and it is ejected, it will automatically be
reloaded if the image is revisited by navigating the channel.
The "Sort Order" setting determines whether images are sorted in the
channel alphabetically by name or by the time when they were loaded.
This principally affects the order in which images are cycled when using
the up/down "arrow" keys or buttons, and not necessarily how they are
displayed in plugins like "Contents" or "Thumbs" (which generally have
their own setting preference for ordering).
The "Use scrollbars" check box controls whether the channel viewer will
show scroll bars around the edge of the viewer frame.
**Remember Preferences**
When an image is loaded, a profile is created and attached to the image
metadata in the channel. These profiles are continuously updated with
viewer state as the image is manipulated. The "Remember" preferences
control which parts of these profiles are restored to the viewer state
when the image is navigated to in the channel:
* "Restore Scale" will restore the zoom (scale) level
* "Restore Pan" will restore the pan position
* "Restore Transform" will restore any flip or swap axes transforms
* "Restore Rotation" will restore any rotation of the image
* "Restore Cuts" will restore any cut levels for the image
* "Restore Scale" will restore any coloring adjustments made (including
color map, color distribution, contrast/stretch, etc.)
"""
import math
from ginga.gw import Widgets
from ginga.misc import ParamSet, Bunch
from ginga import cmap, imap, trcalc
from ginga import GingaPlugin
from ginga import AutoCuts, ColorDist
from ginga.util import wcs, wcsmod, rgb_cms
__all_ = ['Preferences']
class Preferences(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(Preferences, self).__init__(fv, fitsimage)
self.cmap_names = cmap.get_names()
self.imap_names = imap.get_names()
self.zoomalg_names = ('step', 'rate')
# get Preferences preferences
prefs = self.fv.get_preferences()
self.settings = prefs.create_category('plugin_Preferences')
self.settings.add_defaults(orientation=None)
self.settings.load(onError='silent')
self.t_ = self.fitsimage.get_settings()
self.autocuts_cache = {}
self.gui_up = False
self.calg_names = ColorDist.get_dist_names()
self.autozoom_options = self.fitsimage.get_autozoom_options()
self.autocut_options = self.fitsimage.get_autocuts_options()
self.autocut_methods = self.fitsimage.get_autocut_methods()
self.autocenter_options = self.fitsimage.get_autocenter_options()
self.pancoord_options = ('data', 'wcs')
self.sort_options = ('loadtime', 'alpha')
for key in ['color_map', 'intensity_map',
'color_algorithm', 'color_hashsize']:
self.t_.get_setting(key).add_callback(
'set', self.rgbmap_changed_ext_cb)
self.t_.get_setting('autozoom').add_callback(
'set', self.autozoom_changed_ext_cb)
self.t_.get_setting('autocenter').add_callback(
'set', self.autocenter_changed_ext_cb)
self.t_.get_setting('autocuts').add_callback(
'set', self.autocuts_changed_ext_cb)
for key in ['switchnew', 'raisenew', 'genthumb']:
self.t_.get_setting(key).add_callback(
'set', self.set_chprefs_ext_cb)
for key in ['pan']:
self.t_.get_setting(key).add_callback(
'set', self.pan_changed_ext_cb)
for key in ['scale']:
self.t_.get_setting(key).add_callback(
'set', self.scale_changed_ext_cb)
self.t_.get_setting('zoom_algorithm').add_callback(
'set', self.set_zoomalg_ext_cb)
self.t_.get_setting('zoom_rate').add_callback(
'set', self.set_zoomrate_ext_cb)
for key in ['scale_x_base', 'scale_y_base']:
self.t_.get_setting(key).add_callback(
'set', self.scalebase_changed_ext_cb)
self.t_.get_setting('rot_deg').add_callback(
'set', self.set_rotate_ext_cb)
for name in ('flip_x', 'flip_y', 'swap_xy'):
self.t_.get_setting(name).add_callback(
'set', self.set_transform_ext_cb)
self.t_.get_setting('autocut_method').add_callback('set',
self.set_autocut_method_ext_cb)
self.t_.get_setting('autocut_params').add_callback('set',
self.set_autocut_params_ext_cb)
self.t_.get_setting('cuts').add_callback(
'set', self.cutset_cb)
self.t_.setdefault('wcs_coords', 'icrs')
self.t_.setdefault('wcs_display', 'sexagesimal')
# buffer len (number of images in memory)
self.t_.add_defaults(numImages=4)
self.t_.get_setting('numImages').add_callback('set', self.set_buflen_ext_cb)
# preload images
self.t_.add_defaults(preload_images=False)
self.icc_profiles = list(rgb_cms.get_profiles())
self.icc_profiles.insert(0, None)
self.icc_intents = rgb_cms.get_intents()
def build_gui(self, container):
top = Widgets.VBox()
top.set_border_width(4)
vbox, sw, orientation = Widgets.get_oriented_box(container,
orientation=self.settings.get('orientation', None))
self.orientation = orientation
#vbox.set_border_width(4)
vbox.set_spacing(2)
# COLOR DISTRIBUTION OPTIONS
fr = Widgets.Frame("Color Distribution")
captions = (('Algorithm:', 'label', 'Algorithm', 'combobox'),
#('Table Size:', 'label', 'Table Size', 'entryset'),
('Dist Defaults', 'button'))
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
self.w.calg_choice = b.algorithm
#self.w.table_size = b.table_size
b.algorithm.set_tooltip("Choose a color distribution algorithm")
#b.table_size.set_tooltip("Set size of the distribution hash table")
b.dist_defaults.set_tooltip("Restore color distribution defaults")
b.dist_defaults.add_callback('activated',
lambda w: self.set_default_distmaps())
combobox = b.algorithm
options = []
index = 0
for name in self.calg_names:
options.append(name)
combobox.append_text(name)
index += 1
try:
index = self.calg_names.index(self.t_.get('color_algorithm',
"linear"))
combobox.set_index(index)
except Exception:
pass
combobox.add_callback('activated', self.set_calg_cb)
## entry = b.table_size
## entry.set_text(str(self.t_.get('color_hashsize', 65535)))
## entry.add_callback('activated', self.set_tablesize_cb)
fr.set_widget(w)
vbox.add_widget(fr)
# COLOR MAPPING OPTIONS
fr = Widgets.Frame("Color Mapping")
captions = (('Colormap:', 'label', 'Colormap', 'combobox'),
('Intensity:', 'label', 'Intensity', 'combobox'),
('Color Defaults', 'button'))
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
self.w.cmap_choice = b.colormap
self.w.imap_choice = b.intensity
b.color_defaults.add_callback('activated',
lambda w: self.set_default_cmaps())
b.colormap.set_tooltip("Choose a color map for this image")
b.intensity.set_tooltip("Choose an intensity map for this image")
b.color_defaults.set_tooltip("Restore default color and intensity maps")
fr.set_widget(w)
vbox.add_widget(fr)
combobox = b.colormap
options = []
index = 0
for name in self.cmap_names:
options.append(name)
combobox.append_text(name)
index += 1
cmap_name = self.t_.get('color_map', "gray")
try:
index = self.cmap_names.index(cmap_name)
except Exception:
index = self.cmap_names.index('gray')
combobox.set_index(index)
combobox.add_callback('activated', self.set_cmap_cb)
combobox = b.intensity
options = []
index = 0
for name in self.imap_names:
options.append(name)
combobox.append_text(name)
index += 1
imap_name = self.t_.get('intensity_map', "ramp")
try:
index = self.imap_names.index(imap_name)
except Exception:
index = self.imap_names.index('ramp')
combobox.set_index(index)
combobox.add_callback('activated', self.set_imap_cb)
# AUTOCUTS OPTIONS
fr = Widgets.Frame("Auto Cuts")
vbox2 = Widgets.VBox()
fr.set_widget(vbox2)
captions = (('Cut Low:', 'label', 'Cut Low Value', 'llabel',
'Cut Low', 'entry'),
('Cut High:', 'label', 'Cut High Value', 'llabel',
'Cut High', 'entry'),
('spacer_1', 'spacer', 'spacer_2', 'spacer',
'Cut Levels', 'button'),
('Auto Method:', 'label', 'Auto Method', 'combobox',
'Auto Levels', 'button'),)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
loval, hival = self.t_['cuts']
b.cut_levels.set_tooltip("Set cut levels manually")
b.auto_levels.set_tooltip("Set cut levels by algorithm")
b.cut_low.set_tooltip("Set low cut level (press Enter)")
b.cut_low.set_length(9)
b.cut_low_value.set_text('%.4g' % (loval))
b.cut_high.set_tooltip("Set high cut level (press Enter)")
b.cut_high.set_length(9)
b.cut_high_value.set_text('%.4g' % (hival))
b.cut_low.add_callback('activated', self.cut_levels)
b.cut_high.add_callback('activated', self.cut_levels)
b.cut_levels.add_callback('activated', self.cut_levels)
b.auto_levels.add_callback('activated', self.auto_levels)
# Setup auto cuts method choice
combobox = b.auto_method
index = 0
method = self.t_.get('autocut_method', "histogram")
for name in self.autocut_methods:
combobox.append_text(name)
index += 1
try:
index = self.autocut_methods.index(method)
combobox.set_index(index)
except Exception:
pass
combobox.add_callback('activated', self.set_autocut_method_cb)
b.auto_method.set_tooltip("Choose algorithm for auto levels")
vbox2.add_widget(w, stretch=0)
self.w.acvbox = Widgets.VBox()
vbox2.add_widget(self.w.acvbox, stretch=1)
vbox.add_widget(fr, stretch=0)
# TRANSFORM OPTIONS
fr = Widgets.Frame("Transform")
captions = (('Flip X', 'checkbutton', 'Flip Y', 'checkbutton',
'Swap XY', 'checkbutton'),
('Rotate:', 'label', 'Rotate', 'spinfloat'),
('Restore', 'button'),)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
for name in ('flip_x', 'flip_y', 'swap_xy'):
btn = b[name]
btn.set_state(self.t_.get(name, False))
btn.add_callback('activated', self.set_transforms_cb)
b.flip_x.set_tooltip("Flip the image around the X axis")
b.flip_y.set_tooltip("Flip the image around the Y axis")
b.swap_xy.set_tooltip("Swap the X and Y axes in the image")
b.rotate.set_tooltip("Rotate the image around the pan position")
b.restore.set_tooltip("Clear any transforms and center image")
b.restore.add_callback('activated', self.restore_cb)
b.rotate.set_limits(0.00, 359.99999999, incr_value=10.0)
b.rotate.set_value(0.00)
b.rotate.set_decimals(8)
b.rotate.add_callback('value-changed', self.rotate_cb)
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
# WCS OPTIONS
fr = Widgets.Frame("WCS")
captions = (('WCS Coords:', 'label', 'WCS Coords', 'combobox'),
('WCS Display:', 'label', 'WCS Display', 'combobox'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.wcs_coords.set_tooltip("Set WCS coordinate system")
b.wcs_display.set_tooltip("Set WCS display format")
# Setup WCS coords method choice
combobox = b.wcs_coords
index = 0
for name in wcsmod.coord_types:
combobox.append_text(name)
index += 1
method = self.t_.get('wcs_coords', "")
try:
index = wcsmod.coord_types.index(method)
combobox.set_index(index)
except ValueError:
pass
combobox.add_callback('activated', self.set_wcs_params_cb)
# Setup WCS display format method choice
combobox = b.wcs_display
index = 0
for name in wcsmod.display_types:
combobox.append_text(name)
index += 1
method = self.t_.get('wcs_display', "sexagesimal")
try:
index = wcsmod.display_types.index(method)
combobox.set_index(index)
except ValueError:
pass
combobox.add_callback('activated', self.set_wcs_params_cb)
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
# ZOOM OPTIONS
fr = Widgets.Frame("Zoom")
captions = (('Zoom Alg:', 'label', 'Zoom Alg', 'combobox'),
('Zoom Rate:', 'label', 'Zoom Rate', 'spinfloat'),
('Stretch XY:', 'label', 'Stretch XY', 'combobox'),
('Stretch Factor:', 'label', 'Stretch Factor', 'spinfloat'),
('Scale X:', 'label', 'Scale X', 'entryset'),
('Scale Y:', 'label', 'Scale Y', 'entryset'),
('Scale Min:', 'label', 'Scale Min', 'entryset'),
('Scale Max:', 'label', 'Scale Max', 'entryset'),
('Interpolation:', 'label', 'Interpolation', 'combobox'),
('Zoom Defaults', 'button'))
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
index = 0
for name in self.zoomalg_names:
b.zoom_alg.append_text(name.capitalize())
index += 1
zoomalg = self.t_.get('zoom_algorithm', "step")
try:
index = self.zoomalg_names.index(zoomalg)
b.zoom_alg.set_index(index)
except Exception:
pass
b.zoom_alg.set_tooltip("Choose Zoom algorithm")
b.zoom_alg.add_callback('activated', self.set_zoomalg_cb)
index = 0
for name in ('X', 'Y'):
b.stretch_xy.append_text(name)
index += 1
b.stretch_xy.set_index(0)
b.stretch_xy.set_tooltip("Stretch pixels in X or Y")
b.stretch_xy.add_callback('activated', self.set_stretch_cb)
b.stretch_factor.set_limits(1.0, 10.0, incr_value=0.10)
b.stretch_factor.set_value(1.0)
b.stretch_factor.set_decimals(8)
b.stretch_factor.add_callback('value-changed', self.set_stretch_cb)
b.stretch_factor.set_tooltip("Length of pixel relative to 1 on other side")
b.stretch_factor.set_enabled(zoomalg != 'step')
zoomrate = self.t_.get('zoom_rate', math.sqrt(2.0))
b.zoom_rate.set_limits(1.01, 10.0, incr_value=0.1)
b.zoom_rate.set_value(zoomrate)
b.zoom_rate.set_decimals(8)
b.zoom_rate.set_enabled(zoomalg != 'step')
b.zoom_rate.set_tooltip("Step rate of increase/decrease per zoom level")
b.zoom_rate.add_callback('value-changed', self.set_zoomrate_cb)
b.zoom_defaults.add_callback('activated', self.set_zoom_defaults_cb)
scale_x, scale_y = self.fitsimage.get_scale_xy()
b.scale_x.set_tooltip("Set the scale in X axis")
b.scale_x.set_text(str(scale_x))
b.scale_x.add_callback('activated', self.set_scale_cb)
b.scale_y.set_tooltip("Set the scale in Y axis")
b.scale_y.set_text(str(scale_y))
b.scale_y.add_callback('activated', self.set_scale_cb)
scale_min, scale_max = self.t_['scale_min'], self.t_['scale_max']
b.scale_min.set_text(str(scale_min))
b.scale_min.add_callback('activated', self.set_scale_limit_cb)
b.scale_min.set_tooltip("Set the minimum allowed scale in any axis")
b.scale_max.set_text(str(scale_max))
b.scale_max.add_callback('activated', self.set_scale_limit_cb)
b.scale_min.set_tooltip("Set the maximum allowed scale in any axis")
index = 0
for name in trcalc.interpolation_methods:
b.interpolation.append_text(name)
index += 1
interp = self.t_.get('interpolation', "basic")
try:
index = trcalc.interpolation_methods.index(interp)
except ValueError:
# previous choice might not be available if preferences
# were saved when opencv was being used--if so, default
# to "basic"
index = trcalc.interpolation_methods.index('basic')
b.interpolation.set_index(index)
b.interpolation.set_tooltip("Choose interpolation method")
b.interpolation.add_callback('activated', self.set_interp_cb)
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
# PAN OPTIONS
fr = Widgets.Frame("Panning")
captions = (('Pan X:', 'label', 'Pan X', 'entry',
'WCS sexagesimal', 'checkbutton'),
('Pan Y:', 'label', 'Pan Y', 'entry',
'Apply Pan', 'button'),
('Pan Coord:', 'label', 'Pan Coord', 'combobox'),
('Center Image', 'button', 'Mark Center', 'checkbutton'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
pan_x, pan_y = self.fitsimage.get_pan()
coord_offset = self.fv.settings.get('pixel_coords_offset', 0.0)
pan_coord = self.t_.get('pan_coord', "data")
if pan_coord == 'data':
pan_x, pan_y = pan_x + coord_offset, pan_y + coord_offset
b.pan_x.set_tooltip("Coordinate for the pan position in X axis")
b.pan_x.set_text(str(pan_x))
#b.pan_x.add_callback('activated', self.set_pan_cb)
b.pan_y.set_tooltip("Coordinate for the pan position in Y axis")
b.pan_y.set_text(str(pan_y))
#b.pan_y.add_callback('activated', self.set_pan_cb)
b.apply_pan.add_callback('activated', self.set_pan_cb)
b.apply_pan.set_tooltip("Set the pan position")
b.wcs_sexagesimal.set_tooltip("Display pan position in sexagesimal")
b.wcs_sexagesimal.add_callback('activated',
lambda w, tf: self._update_pan_coords())
index = 0
for name in self.pancoord_options:
b.pan_coord.append_text(name)
index += 1
index = self.pancoord_options.index(pan_coord)
b.pan_coord.set_index(index)
b.pan_coord.set_tooltip("Pan coordinates type")
b.pan_coord.add_callback('activated', self.set_pan_coord_cb)
b.center_image.set_tooltip("Set the pan position to center of the image")
b.center_image.add_callback('activated', self.center_image_cb)
b.mark_center.set_tooltip("Mark the center (pan locator)")
b.mark_center.add_callback('activated', self.set_misc_cb)
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
fr = Widgets.Frame("New Images")
captions = (('Cut New:', 'label', 'Cut New', 'combobox'),
('Zoom New:', 'label', 'Zoom New', 'combobox'),
('Center New:', 'label', 'Center New', 'combobox'),
('Follow New', 'checkbutton', 'Raise New', 'checkbutton'),
('Create thumbnail', 'checkbutton'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
combobox = b.cut_new
index = 0
for name in self.autocut_options:
combobox.append_text(name)
index += 1
option = self.t_.get('autocuts', "off")
index = self.autocut_options.index(option)
combobox.set_index(index)
combobox.add_callback('activated', self.set_autocuts_cb)
b.cut_new.set_tooltip("Automatically set cut levels for new images")
combobox = b.zoom_new
index = 0
for name in self.autozoom_options:
combobox.append_text(name)
index += 1
option = self.t_.get('autozoom', "off")
index = self.autozoom_options.index(option)
combobox.set_index(index)
combobox.add_callback('activated', self.set_autozoom_cb)
b.zoom_new.set_tooltip("Automatically fit new images to window")
combobox = b.center_new
index = 0
for name in self.autocenter_options:
combobox.append_text(name)
index += 1
option = self.t_.get('autocenter', "off")
# Hack to convert old values that used to be T/F
if isinstance(option, bool):
choice = {True: 'on', False: 'off'}
option = choice[option]
index = self.autocenter_options.index(option)
combobox.set_index(index)
combobox.add_callback('activated', self.set_autocenter_cb)
b.center_new.set_tooltip("Automatically center new images in window")
b.follow_new.set_tooltip("View new images as they arrive")
b.raise_new.set_tooltip("Raise and focus tab for new images")
b.create_thumbnail.set_tooltip("Create thumbnail for new images")
self.w.follow_new.set_state(True)
self.w.follow_new.add_callback('activated', self.set_chprefs_cb)
self.w.raise_new.set_state(True)
self.w.raise_new.add_callback('activated', self.set_chprefs_cb)
self.w.create_thumbnail.set_state(True)
self.w.create_thumbnail.add_callback('activated', self.set_chprefs_cb)
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
exp = Widgets.Expander("General")
captions = (('Num Images:', 'label', 'Num Images', 'entryset'),
('Sort Order:', 'label', 'Sort Order', 'combobox'),
('Use scrollbars', 'checkbutton',
'Preload Images', 'checkbutton'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.num_images.set_tooltip(
"Maximum number of in memory images in channel (0==unlimited)")
num_images = self.t_.get('numImages', 0)
self.w.num_images.set_text(str(num_images))
self.w.num_images.add_callback('activated', self.set_buffer_cb)
combobox = b.sort_order
index = 0
for name in self.sort_options:
combobox.append_text(name)
index += 1
option = self.t_.get('sort_order', 'loadtime')
index = self.sort_options.index(option)
combobox.set_index(index)
combobox.add_callback('activated', self.set_sort_cb)
b.sort_order.set_tooltip("Sort order for images in channel")
scrollbars = self.t_.get('scrollbars', 'off')
self.w.use_scrollbars.set_state(scrollbars in ['on', 'auto'])
self.w.use_scrollbars.add_callback('activated', self.set_scrollbars_cb)
b.use_scrollbars.set_tooltip("Use scrollbars around viewer")
preload_images = self.t_.get('preload_images', False)
self.w.preload_images.set_state(preload_images)
self.w.preload_images.add_callback('activated', self.set_preload_cb)
b.preload_images.set_tooltip(
"Preload adjacent images to speed up access")
fr = Widgets.Frame()
fr.set_widget(w)
exp.set_widget(fr)
vbox.add_widget(exp, stretch=0)
exp = Widgets.Expander("Remember")
captions = (('Restore Scale', 'checkbutton',
'Restore Pan', 'checkbutton'),
('Restore Transform', 'checkbutton',
'Restore Rotation', 'checkbutton'),
('Restore Cuts', 'checkbutton',
'Restore Color Map', 'checkbutton'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
self.w.restore_scale.set_state(self.t_.get('profile_use_scale', False))
self.w.restore_scale.add_callback('activated', self.set_profile_cb)
self.w.restore_scale.set_tooltip("Remember scale with image")
self.w.restore_pan.set_state(self.t_.get('profile_use_pan', False))
self.w.restore_pan.add_callback('activated', self.set_profile_cb)
self.w.restore_pan.set_tooltip("Remember pan position with image")
self.w.restore_transform.set_state(
self.t_.get('profile_use_transform', False))
self.w.restore_transform.add_callback('activated', self.set_profile_cb)
self.w.restore_transform.set_tooltip("Remember transform with image")
self.w.restore_rotation.set_state(
self.t_.get('profile_use_rotation', False))
self.w.restore_rotation.add_callback('activated', self.set_profile_cb)
self.w.restore_rotation.set_tooltip("Remember rotation with image")
self.w.restore_cuts.set_state(self.t_.get('profile_use_cuts', False))
self.w.restore_cuts.add_callback('activated', self.set_profile_cb)
self.w.restore_cuts.set_tooltip("Remember cut levels with image")
self.w.restore_color_map.set_state(
self.t_.get('profile_use_color_map', False))
self.w.restore_color_map.add_callback('activated', self.set_profile_cb)
self.w.restore_color_map.set_tooltip("Remember color map with image")
fr = Widgets.Frame()
fr.set_widget(w)
exp.set_widget(fr)
vbox.add_widget(exp, stretch=0)
exp = Widgets.Expander("ICC Profiles")
captions = (('Output ICC profile:', 'label', 'Output ICC profile',
'combobox'),
('Rendering intent:', 'label', 'Rendering intent',
'combobox'),
('Proof ICC profile:', 'label', 'Proof ICC profile',
'combobox'),
('Proof intent:', 'label', 'Proof intent', 'combobox'),
('__x', 'spacer', 'Black point compensation', 'checkbutton'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
value = self.t_.get('icc_output_profile', None)
combobox = b.output_icc_profile
index = 0
for name in self.icc_profiles:
combobox.append_text(str(name))
index += 1
try:
index = self.icc_profiles.index(value)
combobox.set_index(index)
except Exception:
pass
combobox.add_callback('activated', self.set_icc_profile_cb)
combobox.set_tooltip("ICC profile for the viewer display")
value = self.t_.get('icc_output_intent', 'perceptual')
combobox = b.rendering_intent
index = 0
for name in self.icc_intents:
combobox.append_text(name)
index += 1
try:
index = self.icc_intents.index(value)
combobox.set_index(index)
except Exception:
pass
combobox.add_callback('activated', self.set_icc_profile_cb)
combobox.set_tooltip("Rendering intent for the viewer display")
value = self.t_.get('icc_proof_profile', None)
combobox = b.proof_icc_profile
index = 0
for name in self.icc_profiles:
combobox.append_text(str(name))
index += 1
try:
index = self.icc_profiles.index(value)
combobox.set_index(index)
except Exception:
pass
combobox.add_callback('activated', self.set_icc_profile_cb)
combobox.set_tooltip("ICC profile for soft proofing")
value = self.t_.get('icc_proof_intent', None)
combobox = b.proof_intent
index = 0
for name in self.icc_intents:
combobox.append_text(name)
index += 1
try:
index = self.icc_intents.index(value)
combobox.set_index(index)
except Exception:
pass
combobox.add_callback('activated', self.set_icc_profile_cb)
combobox.set_tooltip("Rendering intent for soft proofing")
value = self.t_.get('icc_black_point_compensation', False)
b.black_point_compensation.set_state(value)
b.black_point_compensation.add_callback(
'activated', self.set_icc_profile_cb)
b.black_point_compensation.set_tooltip("Use black point compensation")
fr = Widgets.Frame()
fr.set_widget(w)
exp.set_widget(fr)
vbox.add_widget(exp, stretch=0)
top.add_widget(sw, stretch=1)
btns = Widgets.HBox()
btns.set_spacing(4)
btns.set_border_width(4)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn)
btn = Widgets.Button("Help")
btn.add_callback('activated', lambda w: self.help())
btns.add_widget(btn, stretch=0)
btn = Widgets.Button("Save Settings")
btn.add_callback('activated', lambda w: self.save_preferences())
btns.add_widget(btn)
btns.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns, stretch=0)
container.add_widget(top, stretch=1)
self.gui_up = True
def set_cmap_cb(self, w, index):
"""This callback is invoked when the user selects a new color
map from the preferences pane."""
name = cmap.get_names()[index]
self.t_.set(color_map=name)
def set_imap_cb(self, w, index):
"""This callback is invoked when the user selects a new intensity
map from the preferences pane."""
name = imap.get_names()[index]
self.t_.set(intensity_map=name)
def set_calg_cb(self, w, index):
"""This callback is invoked when the user selects a new color
hashing algorithm from the preferences pane."""
#index = w.get_index()
name = self.calg_names[index]
self.t_.set(color_algorithm=name)
def set_tablesize_cb(self, w):
value = int(w.get_text())
self.t_.set(color_hashsize=value)
def set_default_cmaps(self):
cmap_name = "gray"
imap_name = "ramp"
index = self.cmap_names.index(cmap_name)
self.w.cmap_choice.set_index(index)
index = self.imap_names.index(imap_name)
self.w.imap_choice.set_index(index)
self.t_.set(color_map=cmap_name, intensity_map=imap_name)
def set_default_distmaps(self):
name = 'linear'
index = self.calg_names.index(name)
self.w.calg_choice.set_index(index)
hashsize = 65535
## self.w.table_size.set_text(str(hashsize))
self.t_.set(color_algorithm=name, color_hashsize=hashsize)
def set_zoomrate_cb(self, w, rate):
self.t_.set(zoom_rate=rate)
def set_zoomrate_ext_cb(self, setting, value):
if not self.gui_up:
return
self.w.zoom_rate.set_value(value)
def set_zoomalg_cb(self, w, idx):
self.t_.set(zoom_algorithm=self.zoomalg_names[idx])
def set_zoomalg_ext_cb(self, setting, value):
if not self.gui_up:
return
if value == 'step':
self.w.zoom_alg.set_index(0)
self.w.zoom_rate.set_enabled(False)
self.w.stretch_factor.set_enabled(False)
else:
self.w.zoom_alg.set_index(1)
self.w.zoom_rate.set_enabled(True)
self.w.stretch_factor.set_enabled(True)
def set_interp_cb(self, w, idx):
self.t_.set(interpolation=trcalc.interpolation_methods[idx])
def scalebase_changed_ext_cb(self, setting, value):
if not self.gui_up:
return
scale_x_base, scale_y_base = self.fitsimage.get_scale_base_xy()
ratio = float(scale_x_base) / float(scale_y_base)
if ratio < 1.0:
# Y is stretched
idx = 1
ratio = 1.0 / ratio
elif ratio > 1.0:
# X is stretched
idx = 0
else:
idx = self.w.stretch_xy.get_index()
# Update stretch controls to reflect actual scale
self.w.stretch_xy.set_index(idx)
self.w.stretch_factor.set_value(ratio)
def set_zoom_defaults_cb(self, w):
rate = math.sqrt(2.0)
self.w.stretch_factor.set_value(1.0)
self.t_.set(zoom_algorithm='step', zoom_rate=rate,
scale_x_base=1.0, scale_y_base=1.0)
def set_stretch_cb(self, *args):
axis = self.w.stretch_xy.get_index()
value = self.w.stretch_factor.get_value()
if axis == 0:
self.t_.set(scale_x_base=value, scale_y_base=1.0)
else:
self.t_.set(scale_x_base=1.0, scale_y_base=value)
def set_autocenter_cb(self, w, idx):
option = self.autocenter_options[idx]
self.fitsimage.set_autocenter(option)
self.t_.set(autocenter=option)
def autocenter_changed_ext_cb(self, setting, option):
if not self.gui_up:
return
index = self.autocenter_options.index(option)
self.w.center_new.set_index(index)
def set_scale_cb(self, w, val):
scale_x = float(self.w.scale_x.get_text())
scale_y = float(self.w.scale_y.get_text())
self.fitsimage.scale_to(scale_x, scale_y)
def scale_changed_ext_cb(self, setting, value):
if not self.gui_up:
return
scale_x, scale_y = value
self.w.scale_x.set_text(str(scale_x))
self.w.scale_y.set_text(str(scale_y))
def set_scale_limit_cb(self, *args):
scale_min = self.w.scale_min.get_text().lower()
if scale_min == 'none':
scale_min = None
else:
scale_min = float(scale_min)
scale_max = self.w.scale_max.get_text().lower()
if scale_max == 'none':
scale_max = None
else:
scale_max = float(scale_max)
self.t_.set(scale_min=scale_min, scale_max=scale_max)
def set_autozoom_cb(self, w, idx):
option = self.autozoom_options[idx]
self.fitsimage.enable_autozoom(option)
self.t_.set(autozoom=option)
def autozoom_changed_ext_cb(self, setting, option):
if not self.gui_up:
return
index = self.autozoom_options.index(option)
self.w.zoom_new.set_index(index)
def cut_levels(self, w):
fitsimage = self.fitsimage
loval, hival = fitsimage.get_cut_levels()
try:
lostr = self.w.cut_low.get_text().strip()
if lostr != '':
loval = float(lostr)
histr = self.w.cut_high.get_text().strip()
if histr != '':
hival = float(histr)
self.logger.debug("locut=%f hicut=%f" % (loval, hival))
return fitsimage.cut_levels(loval, hival)
except Exception as e:
self.fv.show_error("Error cutting levels: %s" % (str(e)))
return True
def auto_levels(self, w):
self.fitsimage.auto_levels()
def cutset_cb(self, setting, value):
if not self.gui_up:
return
loval, hival = value
self.w.cut_low_value.set_text('%.4g' % (loval))
self.w.cut_high_value.set_text('%.4g' % (hival))
def config_autocut_params(self, method):
try:
index = self.autocut_methods.index(method)
self.w.auto_method.set_index(index)
except Exception:
pass
# remove old params
self.w.acvbox.remove_all()
# Create new autocuts object of the right kind
ac_class = AutoCuts.get_autocuts(method)
# Build up a set of control widgets for the autocuts
# algorithm tweakable parameters
paramlst = ac_class.get_params_metadata()
# Get the canonical version of this object stored in our cache
# and make a ParamSet from it
params = self.autocuts_cache.setdefault(method, Bunch.Bunch())
self.ac_params = ParamSet.ParamSet(self.logger, params)
# Build widgets for the parameter/attribute list
w = self.ac_params.build_params(paramlst,
orientation=self.orientation)
self.ac_params.add_callback('changed', self.autocut_params_changed_cb)
# Add this set of widgets to the pane
self.w.acvbox.add_widget(w, stretch=1)
def set_autocut_method_ext_cb(self, setting, value):
if not self.gui_up:
return
autocut_method = self.t_['autocut_method']
self.fv.gui_do(self.config_autocut_params, autocut_method)
def set_autocut_params_ext_cb(self, setting, value):
if not self.gui_up:
return
params = self.t_['autocut_params']
params_d = dict(params) # noqa
self.ac_params.update_params(params_d)
#self.fv.gui_do(self.ac_params.params_to_widgets)
def set_autocut_method_cb(self, w, idx):
method = self.autocut_methods[idx]
self.config_autocut_params(method)
args, kwdargs = self.ac_params.get_params()
params = list(kwdargs.items())
self.t_.set(autocut_method=method, autocut_params=params)
def autocut_params_changed_cb(self, paramObj, ac_obj):
"""This callback is called when the user changes the attributes of
an object via the paramSet.
"""
args, kwdargs = paramObj.get_params()
params = list(kwdargs.items())
self.t_.set(autocut_params=params)
def set_autocuts_cb(self, w, index):
option = self.autocut_options[index]
self.fitsimage.enable_autocuts(option)
self.t_.set(autocuts=option)
def autocuts_changed_ext_cb(self, setting, option):
self.logger.debug("autocuts changed to %s" % option)
index = self.autocut_options.index(option)
if self.gui_up:
self.w.cut_new.set_index(index)
def set_transforms_cb(self, *args):
flip_x = self.w.flip_x.get_state()
flip_y = self.w.flip_y.get_state()
swap_xy = self.w.swap_xy.get_state()
self.t_.set(flip_x=flip_x, flip_y=flip_y, swap_xy=swap_xy)
return True
def set_transform_ext_cb(self, setting, value):
if not self.gui_up:
return
flip_x, flip_y, swap_xy = (
self.t_['flip_x'], self.t_['flip_y'], self.t_['swap_xy'])
self.w.flip_x.set_state(flip_x)
self.w.flip_y.set_state(flip_y)
self.w.swap_xy.set_state(swap_xy)
def rgbmap_changed_ext_cb(self, setting, value):
if not self.gui_up:
return
calg_name = self.t_['color_algorithm']
try:
idx = self.calg_names.index(calg_name)
except IndexError:
idx = 0
self.w.algorithm.set_index(idx)
cmap_name = self.t_['color_map']
try:
idx = self.cmap_names.index(cmap_name)
except IndexError:
idx = 0
self.w.colormap.set_index(idx)
imap_name = self.t_['intensity_map']
try:
idx = self.imap_names.index(imap_name)
except IndexError:
idx = 0
self.w.intensity.set_index(idx)
def set_buflen_ext_cb(self, setting, value):
num_images = self.t_['numImages']
# update the datasrc length
chinfo = self.channel
chinfo.datasrc.set_bufsize(num_images)
self.logger.debug("num images was set to {0}".format(num_images))
if not self.gui_up:
return
self.w.num_images.set_text(str(num_images))
def set_sort_cb(self, w, index):
"""This callback is invoked when the user selects a new sort order
from the preferences pane."""
name = self.sort_options[index]
self.t_.set(sort_order=name)
def set_preload_cb(self, w, tf):
"""This callback is invoked when the user checks the preload images
box in the preferences pane."""
self.t_.set(preload_images=tf)
def set_scrollbars_cb(self, w, tf):
"""This callback is invoked when the user checks the 'Use Scrollbars'
box in the preferences pane."""
scrollbars = 'on' if tf else 'off'
self.t_.set(scrollbars=scrollbars)
def set_icc_profile_cb(self, setting, idx):
idx = self.w.output_icc_profile.get_index()
output_profile_name = self.icc_profiles[idx]
idx = self.w.rendering_intent.get_index()
intent_name = self.icc_intents[idx]
idx = self.w.proof_icc_profile.get_index()
proof_profile_name = self.icc_profiles[idx]
idx = self.w.proof_intent.get_index()
proof_intent = self.icc_intents[idx]
bpc = self.w.black_point_compensation.get_state()
self.t_.set(icc_output_profile=output_profile_name,
icc_output_intent=intent_name,
icc_proof_profile=proof_profile_name,
icc_proof_intent=proof_intent,
icc_black_point_compensation=bpc)
return True
def rotate_cb(self, w, deg):
#deg = self.w.rotate.get_value()
self.t_.set(rot_deg=deg)
return True
def set_rotate_ext_cb(self, setting, value):
if not self.gui_up:
return
self.w.rotate.set_value(value)
return True
def center_image_cb(self, *args):
self.fitsimage.center_image()
return True
def pan_changed_ext_cb(self, setting, value):
if not self.gui_up:
return
self._update_pan_coords()
def set_pan_cb(self, *args):
idx = self.w.pan_coord.get_index()
pan_coord = self.pancoord_options[idx]
pan_xs = self.w.pan_x.get_text().strip()
pan_ys = self.w.pan_y.get_text().strip()
# TODO: use current value for other coord if only one coord supplied
if (':' in pan_xs) or (':' in pan_ys):
# TODO: get maximal precision
pan_x = wcs.hmsStrToDeg(pan_xs)
pan_y = wcs.dmsStrToDeg(pan_ys)
pan_coord = 'wcs'
elif pan_coord == 'wcs':
pan_x = float(pan_xs)
pan_y = float(pan_ys)
else:
coord_offset = self.fv.settings.get('pixel_coords_offset', 0.0)
pan_x = float(pan_xs) - coord_offset
pan_y = float(pan_ys) - coord_offset
self.fitsimage.set_pan(pan_x, pan_y, coord=pan_coord)
return True
def _update_pan_coords(self):
pan_coord = self.t_.get('pan_coord', 'data')
pan_x, pan_y = self.fitsimage.get_pan(coord=pan_coord)
#self.logger.debug("updating pan coords (%s) %f %f" % (pan_coord, pan_x, pan_y))
if pan_coord == 'wcs':
use_sex = self.w.wcs_sexagesimal.get_state()
if use_sex:
pan_x = wcs.raDegToString(pan_x, format='%02d:%02d:%010.7f')
pan_y = wcs.decDegToString(pan_y, format='%s%02d:%02d:%09.7f')
else:
coord_offset = self.fv.settings.get('pixel_coords_offset', 0.0)
pan_x += coord_offset
pan_y += coord_offset
self.w.pan_x.set_text(str(pan_x))
self.w.pan_y.set_text(str(pan_y))
index = self.pancoord_options.index(pan_coord)
self.w.pan_coord.set_index(index)
def set_pan_coord_cb(self, w, idx):
pan_coord = self.pancoord_options[idx]
pan_x, pan_y = self.fitsimage.get_pan(coord=pan_coord)
self.t_.set(pan=(pan_x, pan_y), pan_coord=pan_coord)
#self._update_pan_coords()
return True
def restore_cb(self, *args):
self.t_.set(flip_x=False, flip_y=False, swap_xy=False,
rot_deg=0.0)
self.fitsimage.center_image()
return True
def set_misc_cb(self, *args):
markc = (self.w.mark_center.get_state() != 0)
self.t_.set(show_pan_position=markc)
self.fitsimage.show_pan_mark(markc)
return True
def set_chprefs_cb(self, *args):
switchnew = (self.w.follow_new.get_state() != 0)
raisenew = (self.w.raise_new.get_state() != 0)
genthumb = (self.w.create_thumbnail.get_state() != 0)
self.t_.set(switchnew=switchnew, raisenew=raisenew,
genthumb=genthumb)
def set_chprefs_ext_cb(self, *args):
if self.gui_up:
self.w.follow_new.set_state(self.t_['switchnew'])
self.w.raise_new.set_state(self.t_['raisenew'])
self.w.create_thumbnail.set_state(self.t_['genthumb'])
def set_profile_cb(self, *args):
restore_scale = (self.w.restore_scale.get_state() != 0)
restore_pan = (self.w.restore_pan.get_state() != 0)
restore_cuts = (self.w.restore_cuts.get_state() != 0)
restore_transform = (self.w.restore_transform.get_state() != 0)
restore_rotation = (self.w.restore_rotation.get_state() != 0)
restore_color_map = (self.w.restore_color_map.get_state() != 0)
self.t_.set(profile_use_scale=restore_scale, profile_use_pan=restore_pan,
profile_use_cuts=restore_cuts,
profile_use_transform=restore_transform,
profile_use_rotation=restore_rotation,
profile_use_color_map=restore_color_map)
def set_buffer_cb(self, *args):
num_images = int(self.w.num_images.get_text())
self.logger.debug("setting num images {0}".format(num_images))
self.t_.set(numImages=num_images)
def set_wcs_params_cb(self, *args):
idx = self.w.wcs_coords.get_index()
try:
ctype = wcsmod.coord_types[idx]
except IndexError:
ctype = 'icrs'
idx = self.w.wcs_display.get_index()
dtype = wcsmod.display_types[idx]
self.t_.set(wcs_coords=ctype, wcs_display=dtype)
def preferences_to_controls(self):
prefs = self.t_
# color map
rgbmap = self.fitsimage.get_rgbmap()
cm = rgbmap.get_cmap()
try:
index = self.cmap_names.index(cm.name)
except ValueError:
# may be a custom color map installed
index = 0
self.w.cmap_choice.set_index(index)
# color dist algorithm
calg = rgbmap.get_hash_algorithm()
index = self.calg_names.index(calg)
self.w.calg_choice.set_index(index)
## size = rgbmap.get_hash_size()
## self.w.table_size.set_text(str(size))
# intensity map
im = rgbmap.get_imap()
try:
index = self.imap_names.index(im.name)
except ValueError:
# may be a custom intensity map installed
index = 0
self.w.imap_choice.set_index(index)
# TODO: this is a HACK to get around Qt's callbacks
# on setting widget values--need a way to disable callbacks
# for direct setting
auto_zoom = prefs.get('autozoom', 'off')
# zoom settings
zoomalg = prefs.get('zoom_algorithm', "step")
index = self.zoomalg_names.index(zoomalg)
self.w.zoom_alg.set_index(index)
zoomrate = self.t_.get('zoom_rate', math.sqrt(2.0))
self.w.zoom_rate.set_value(zoomrate)
self.w.zoom_rate.set_enabled(zoomalg != 'step')
self.w.stretch_factor.set_enabled(zoomalg != 'step')
self.scalebase_changed_ext_cb(prefs, None)
scale_x, scale_y = self.fitsimage.get_scale_xy()
self.w.scale_x.set_text(str(scale_x))
self.w.scale_y.set_text(str(scale_y))
scale_min = prefs.get('scale_min', None)
self.w.scale_min.set_text(str(scale_min))
scale_max = prefs.get('scale_max', None)
self.w.scale_max.set_text(str(scale_max))
# panning settings
self._update_pan_coords()
self.w.mark_center.set_state(prefs.get('show_pan_position', False))
# transform settings
self.w.flip_x.set_state(prefs.get('flip_x', False))
self.w.flip_y.set_state(prefs.get('flip_y', False))
self.w.swap_xy.set_state(prefs.get('swap_xy', False))
self.w.rotate.set_value(prefs.get('rot_deg', 0.00))
# auto cuts settings
autocuts = prefs.get('autocuts', 'off')
index = self.autocut_options.index(autocuts)
self.w.cut_new.set_index(index)
autocut_method = prefs.get('autocut_method', None)
if autocut_method is None:
autocut_method = 'histogram'
else:
## params = prefs.get('autocut_params', {})
## p = self.autocuts_cache.setdefault(autocut_method, {})
## p.update(params)
pass
self.config_autocut_params(autocut_method)
# auto zoom settings
auto_zoom = prefs.get('autozoom', 'off')
index = self.autozoom_options.index(auto_zoom)
self.w.zoom_new.set_index(index)
# wcs settings
method = prefs.get('wcs_coords', "icrs")
try:
index = wcsmod.coord_types.index(method)
self.w.wcs_coords.set_index(index)
except ValueError:
pass
method = prefs.get('wcs_display', "sexagesimal")
try:
index = wcsmod.display_types.index(method)
self.w.wcs_display.set_index(index)
except ValueError:
pass
# misc settings
prefs.setdefault('switchnew', True)
self.w.follow_new.set_state(prefs['switchnew'])
prefs.setdefault('raisenew', True)
self.w.raise_new.set_state(prefs['raisenew'])
prefs.setdefault('genthumb', True)
self.w.create_thumbnail.set_state(prefs['genthumb'])
num_images = prefs.get('numImages', 0)
self.w.num_images.set_text(str(num_images))
prefs.setdefault('preload_images', False)
self.w.preload_images.set_state(prefs['preload_images'])
# profile settings
prefs.setdefault('profile_use_scale', False)
self.w.restore_scale.set_state(prefs['profile_use_scale'])
prefs.setdefault('profile_use_pan', False)
self.w.restore_pan.set_state(prefs['profile_use_pan'])
prefs.setdefault('profile_use_cuts', False)
self.w.restore_cuts.set_state(prefs['profile_use_cuts'])
prefs.setdefault('profile_use_transform', False)
self.w.restore_transform.set_state(prefs['profile_use_transform'])
prefs.setdefault('profile_use_rotation', False)
self.w.restore_rotation.set_state(prefs['profile_use_rotation'])
prefs.setdefault('profile_use_color_map', False)
self.w.restore_color_map.set_state(prefs['profile_use_color_map'])
def save_preferences(self):
self.t_.save()
def close(self):
self.fv.stop_local_plugin(self.chname, str(self))
return True
def start(self):
self.preferences_to_controls()
def pause(self):
pass
def resume(self):
pass
def stop(self):
self.gui_up = False
def redo(self):
pass
def __str__(self):
return 'preferences'
# END
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/doc/mpl_examples/event_handling/viewlims.py | 6 | 2880 | # Creates two identical panels. Zooming in on the right panel will show
# a rectangle in the first panel, denoting the zoomed region.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
# We just subclass Rectangle so that it can be called with an Axes
# instance, causing the rectangle to update its shape to match the
# bounds of the Axes
class UpdatingRect(Rectangle):
def __call__(self, ax):
self.set_bounds(*ax.viewLim.bounds)
ax.figure.canvas.draw_idle()
# A class that will regenerate a fractal set as we zoom in, so that you
# can actually see the increasing detail. A box in the left panel will show
# the area to which we are zoomed.
class MandlebrotDisplay(object):
def __init__(self, h=500, w=500, niter=50, radius=2., power=2):
self.height = h
self.width = w
self.niter = niter
self.radius = radius
self.power = power
def __call__(self, xstart, xend, ystart, yend):
self.x = np.linspace(xstart, xend, self.width)
self.y = np.linspace(ystart, yend, self.height).reshape(-1,1)
c = self.x + 1.0j * self.y
threshold_time = np.zeros((self.height, self.width))
z = np.zeros(threshold_time.shape, dtype=np.complex)
mask = np.ones(threshold_time.shape, dtype=np.bool)
for i in range(self.niter):
z[mask] = z[mask]**self.power + c[mask]
mask = (np.abs(z) < self.radius)
threshold_time += mask
return threshold_time
def ax_update(self, ax):
ax.set_autoscale_on(False) # Otherwise, infinite loop
#Get the number of points from the number of pixels in the window
dims = ax.axesPatch.get_window_extent().bounds
self.width = int(dims[2] + 0.5)
self.height = int(dims[2] + 0.5)
#Get the range for the new area
xstart,ystart,xdelta,ydelta = ax.viewLim.bounds
xend = xstart + xdelta
yend = ystart + ydelta
# Update the image object with our new data and extent
im = ax.images[-1]
im.set_data(self.__call__(xstart, xend, ystart, yend))
im.set_extent((xstart, xend, ystart, yend))
ax.figure.canvas.draw_idle()
md = MandlebrotDisplay()
Z = md(-2., 0.5, -1.25, 1.25)
fig1, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(Z, origin='lower', extent=(md.x.min(), md.x.max(), md.y.min(), md.y.max()))
ax2.imshow(Z, origin='lower', extent=(md.x.min(), md.x.max(), md.y.min(), md.y.max()))
rect = UpdatingRect([0, 0], 0, 0, facecolor='None', edgecolor='black')
rect.set_bounds(*ax2.viewLim.bounds)
ax1.add_patch(rect)
# Connect for changing the view limits
ax2.callbacks.connect('xlim_changed', rect)
ax2.callbacks.connect('ylim_changed', rect)
ax2.callbacks.connect('xlim_changed', md.ax_update)
ax2.callbacks.connect('ylim_changed', md.ax_update)
plt.show()
| mit |
fabiansinz/attorch | attorch/constraints.py | 3 | 1058 | from torch import nn
def constrain_all(self):
if hasattr(self, 'constrain'):
self.constrain()
for c in self.children():
c.constrain_all()
# extend torch nn.Module to have constrain_all function
nn.Module.constrain_all = constrain_all
# all constrain function takes an optional cache argument
# the cache can be used to store a relatively expensive reusable
# item usable in the constraining. For example, the cache can store the
# binary map of all units that should be constrained.
def positive(weight, cache=None):
weight.data *= weight.data.ge(0).float()
return cache
def negative(weight, cache=None):
weight.data *= weight.data.le(0).float()
return cache
def positive_except_self(weight, cache=None):
pos = weight.data.ge(0).float()
if pos.size()[2] % 2 == 0 or pos.size()[3] % 2 == 0:
raise ValueError('kernel size must be odd')
ii, jj = pos.size()[2] // 2, pos.size()[3] // 2
for i in range(pos.size()[0]):
pos[i, i, ii, jj] = 1
weight.data *= pos
return cache
| mit |
junhuac/MQUIC | depot_tools/external_bin/gsutil/gsutil_4.15/gsutil/third_party/oauth2client/oauth2client/django_orm.py | 70 | 4125 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OAuth 2.0 utilities for Django.
Utilities for using OAuth 2.0 in conjunction with
the Django datastore.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import oauth2client
import base64
import pickle
from django.db import models
from oauth2client.client import Storage as BaseStorage
class CredentialsField(models.Field):
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
if 'null' not in kwargs:
kwargs['null'] = True
super(CredentialsField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, oauth2client.client.Credentials):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
return base64.b64encode(pickle.dumps(value))
class FlowField(models.Field):
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
if 'null' not in kwargs:
kwargs['null'] = True
super(FlowField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, oauth2client.client.Flow):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
return base64.b64encode(pickle.dumps(value))
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from
the datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsField
on a db model class.
"""
def __init__(self, model_class, key_name, key_value, property_name):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
key_value: string, key value for the entity that has the credentials
property_name: string, name of the property that is an CredentialsProperty
"""
self.model_class = model_class
self.key_name = key_name
self.key_value = key_value
self.property_name = property_name
def locked_get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
credential = None
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query)
if len(entities) > 0:
credential = getattr(entities[0], self.property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self)
return credential
def locked_put(self, credentials, overwrite=False):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
overwrite: Boolean, indicates whether you would like these credentials to
overwrite any existing stored credentials.
"""
args = {self.key_name: self.key_value}
if overwrite:
entity, unused_is_new = self.model_class.objects.get_or_create(**args)
else:
entity = self.model_class(**args)
setattr(entity, self.property_name, credentials)
entity.save()
def locked_delete(self):
"""Delete Credentials from the datastore."""
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query).delete()
| mit |
huntxu/neutron | neutron/tests/functional/scheduler/test_dhcp_agent_scheduler.py | 2 | 26352 | # Copyright (c) 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from operator import attrgetter
from neutron_lib.api.definitions import provider_net as providernet
from neutron_lib import constants
from neutron_lib import context
from oslo_utils import uuidutils
import testscenarios
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import common_db_mixin
from neutron.objects import network
from neutron.scheduler import dhcp_agent_scheduler
from neutron.tests.common import helpers
from neutron.tests.unit.plugins.ml2 import test_plugin
from neutron.tests.unit.scheduler import (test_dhcp_agent_scheduler as
test_dhcp_sch)
# Required to generate tests from scenarios. Not compatible with nose.
load_tests = testscenarios.load_tests_apply_scenarios
class BaseTestScheduleNetwork(object):
"""Base class which defines scenarios for schedulers.
agent_count
Number of dhcp agents (also number of hosts).
max_agents_per_network
Maximum DHCP Agents that can be scheduled for a network.
scheduled_agent_count
Number of agents the network has previously scheduled
down_agent_count
Number of dhcp agents which are down
expected_scheduled_agent_count
Number of scheduled agents the schedule() should return
or 'None' if the schedule() cannot schedule the network.
"""
scenarios = [
('No agents scheduled if no agents are present',
dict(agent_count=0,
max_agents_per_network=1,
scheduled_agent_count=0,
down_agent_count=0,
expected_scheduled_agent_count=None)),
('No agents scheduled if network already hosted and'
' max_agents_per_network reached',
dict(agent_count=1,
max_agents_per_network=1,
scheduled_agent_count=1,
down_agent_count=0,
expected_scheduled_agent_count=None)),
('No agents scheduled if all agents are down',
dict(agent_count=2,
max_agents_per_network=1,
scheduled_agent_count=0,
down_agent_count=2,
expected_scheduled_agent_count=None)),
('Agent scheduled to the network if network is not yet hosted',
dict(agent_count=1,
max_agents_per_network=1,
scheduled_agent_count=0,
down_agent_count=0,
expected_scheduled_agent_count=1)),
('Additional Agents scheduled to the network if max_agents_per_network'
' is not yet reached',
dict(agent_count=3,
max_agents_per_network=3,
scheduled_agent_count=1,
down_agent_count=0,
expected_scheduled_agent_count=2)),
('No agent scheduled if agent is dead',
dict(agent_count=3,
max_agents_per_network=3,
scheduled_agent_count=1,
down_agent_count=1,
expected_scheduled_agent_count=1)),
]
class TestChanceScheduleNetwork(test_dhcp_sch.TestDhcpSchedulerBaseTestCase,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
agents_db.AgentDbMixin,
common_db_mixin.CommonDbMixin,
BaseTestScheduleNetwork):
"""Test various scenarios for ChanceScheduler.schedule."""
def test_schedule_network(self):
self.config(dhcp_agents_per_network=self.max_agents_per_network)
scheduler = dhcp_agent_scheduler.ChanceScheduler()
# create dhcp agents
hosts = ['host-%s' % i for i in range(self.agent_count)]
dhcp_agents = self._create_and_set_agents_down(
hosts, down_agent_count=self.down_agent_count)
active_agents = dhcp_agents[self.down_agent_count:]
# schedule some agents before calling schedule
if self.scheduled_agent_count:
# schedule the network
schedule_agents = active_agents[:self.scheduled_agent_count]
scheduler.resource_filter.bind(self.ctx,
schedule_agents, self.network_id)
actual_scheduled_agents = scheduler.schedule(self, self.ctx,
self.network)
if self.expected_scheduled_agent_count:
self.assertEqual(self.expected_scheduled_agent_count,
len(actual_scheduled_agents))
hosted_agents = self.list_dhcp_agents_hosting_network(
self.ctx, self.network_id)
self.assertEqual(self.scheduled_agent_count +
len(actual_scheduled_agents),
len(hosted_agents['agents']))
else:
self.assertEqual([], actual_scheduled_agents)
class TestWeightScheduleNetwork(test_dhcp_sch.TestDhcpSchedulerBaseTestCase,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
agents_db.AgentDbMixin,
common_db_mixin.CommonDbMixin,
BaseTestScheduleNetwork):
"""Test various scenarios for WeightScheduler.schedule."""
def test_weight_schedule_network(self):
self.config(dhcp_agents_per_network=self.max_agents_per_network)
scheduler = dhcp_agent_scheduler.WeightScheduler()
# create dhcp agents
hosts = ['host-%s' % i for i in range(self.agent_count)]
dhcp_agents = self._create_and_set_agents_down(
hosts, down_agent_count=self.down_agent_count)
active_agents = dhcp_agents[self.down_agent_count:]
unscheduled_active_agents = list(active_agents)
# schedule some agents before calling schedule
if self.scheduled_agent_count:
# schedule the network
schedule_agents = active_agents[:self.scheduled_agent_count]
scheduler.resource_filter.bind(self.ctx,
schedule_agents, self.network_id)
for agent in schedule_agents:
unscheduled_active_agents.remove(agent)
actual_scheduled_agents = scheduler.schedule(self, self.ctx,
self.network)
if self.expected_scheduled_agent_count:
sorted_unscheduled_active_agents = sorted(
unscheduled_active_agents,
key=attrgetter('load'))[0:self.expected_scheduled_agent_count]
self.assertItemsEqual(
(agent['id'] for agent in actual_scheduled_agents),
(agent['id'] for agent in sorted_unscheduled_active_agents))
self.assertEqual(self.expected_scheduled_agent_count,
len(actual_scheduled_agents))
hosted_agents = self.list_dhcp_agents_hosting_network(
self.ctx, self.network_id)
self.assertEqual(self.scheduled_agent_count +
len(actual_scheduled_agents),
len(hosted_agents['agents']))
else:
self.assertEqual([], actual_scheduled_agents)
class TestAutoSchedule(test_dhcp_sch.TestDhcpSchedulerBaseTestCase,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
agents_db.AgentDbMixin,
common_db_mixin.CommonDbMixin):
"""Test various scenarios for ChanceScheduler.auto_schedule_networks.
Below is the brief description of the scenario variables
--------------------------------------------------------
agent_count
number of DHCP agents (also number of hosts).
max_agents_per_network
Maximum DHCP Agents that can be scheduled for a network.
network_count
Number of networks.
networks_with_dhcp_disabled
List of networks with dhcp disabled
hosted_networks
A mapping of agent id to the ids of the networks that they
should be initially hosting.
expected_auto_schedule_return_value
Expected return value of 'auto_schedule_networks'.
expected_hosted_networks
This stores the expected networks that should have been scheduled
(or that could have already been scheduled) for each agent
after the 'auto_schedule_networks' function is called.
no_network_with_az_match
If this parameter is True, there is no unscheduled network with
availability_zone_hints matches to an availability_zone of agents
to be scheduled. The default is False.
"""
scenarios = [
('Agent scheduled to the network if network is not yet hosted',
dict(agent_count=1,
max_agents_per_network=1,
network_count=1,
networks_with_dhcp_disabled=[],
hosted_networks={},
expected_auto_schedule_return_value=True,
expected_hosted_networks={'agent-0': ['network-0']})),
('No agent scheduled if no networks are present',
dict(agent_count=1,
max_agents_per_network=1,
network_count=0,
networks_with_dhcp_disabled=[],
hosted_networks={},
expected_auto_schedule_return_value=False,
expected_hosted_networks={'agent-0': []})),
('Agents scheduled to the networks if networks are not yet hosted',
dict(agent_count=2,
max_agents_per_network=3,
network_count=2,
networks_with_dhcp_disabled=[],
hosted_networks={},
expected_auto_schedule_return_value=True,
expected_hosted_networks={'agent-0': ['network-0',
'network-1'],
'agent-1': ['network-0',
'network-1']})),
('No new agents scheduled if networks are already hosted',
dict(agent_count=2,
max_agents_per_network=3,
network_count=2,
networks_with_dhcp_disabled=[],
hosted_networks={'agent-0': ['network-0', 'network-1'],
'agent-1': ['network-0', 'network-1']},
expected_auto_schedule_return_value=True,
expected_hosted_networks={'agent-0': ['network-0',
'network-1'],
'agent-1': ['network-0',
'network-1']})),
('Additional agents scheduled to the networks if'
' max_agents_per_network is not yet reached',
dict(agent_count=4,
max_agents_per_network=3,
network_count=4,
networks_with_dhcp_disabled=[],
hosted_networks={'agent-0': ['network-0', 'network-1'],
'agent-1': ['network-0'],
'agent-2': ['network-2'],
'agent-3': ['network-0', 'network-2']},
expected_auto_schedule_return_value=True,
expected_hosted_networks={'agent-0': ['network-0',
'network-1',
'network-2',
'network-3'],
'agent-1': ['network-0',
'network-1',
'network-2',
'network-3'],
'agent-2': ['network-1',
'network-2',
'network-3'],
'agent-3': ['network-0',
'network-1',
'network-2',
'network-3']})),
('No agents scheduled if networks already hosted and'
' max_agents_per_network reached',
dict(agent_count=4,
max_agents_per_network=1,
network_count=4,
networks_with_dhcp_disabled=[],
hosted_networks={'agent-0': ['network-0'],
'agent-1': ['network-2'],
'agent-2': ['network-1'],
'agent-3': ['network-3']},
expected_auto_schedule_return_value=True,
expected_hosted_networks={'agent-0': ['network-0'],
'agent-1': ['network-2'],
'agent-2': ['network-1'],
'agent-3': ['network-3']})),
('No agents scheduled to the network with dhcp disabled',
dict(agent_count=2,
max_agents_per_network=3,
network_count=2,
networks_with_dhcp_disabled=['network-1'],
hosted_networks={},
expected_auto_schedule_return_value=True,
expected_hosted_networks={'agent-0': ['network-0'],
'agent-1': ['network-0']})),
('No agents scheduled if all networks have dhcp disabled',
dict(agent_count=2,
max_agents_per_network=3,
network_count=2,
networks_with_dhcp_disabled=['network-0', 'network-1'],
hosted_networks={},
expected_auto_schedule_return_value=False,
expected_hosted_networks={'agent-0': [],
'agent-1': []})),
('No agents scheduled if unscheduled network does not match AZ',
dict(agent_count=1,
max_agents_per_network=1,
network_count=1,
networks_with_dhcp_disabled=[],
hosted_networks={},
expected_auto_schedule_return_value=True,
expected_hosted_networks={'agent-0': []},
no_network_with_az_match=True)),
]
def _strip_host_index(self, name):
"""Strips the host index.
Eg. if name = '2-agent-3', then 'agent-3' is returned.
"""
return name[name.find('-') + 1:]
def _extract_index(self, name):
"""Extracts the index number and returns.
Eg. if name = '2-agent-3', then 3 is returned
"""
return int(name.split('-')[-1])
def get_subnets(self, context, fields=None):
subnets = []
for net in self._networks:
enable_dhcp = (self._strip_host_index(net['name']) not in
self.networks_with_dhcp_disabled)
subnets.append({'network_id': net.id,
'enable_dhcp': enable_dhcp,
'segment_id': None})
return subnets
def get_network(self, context, net_id):
az_hints = []
if getattr(self, 'no_network_with_az_match', False):
az_hints = ['not-match']
return {'availability_zone_hints': az_hints}
def _get_hosted_networks_on_dhcp_agent(self, agent_id):
binding_objs = network.NetworkDhcpAgentBinding.get_objects(
self.ctx, dhcp_agent_id=agent_id)
return [item.network_id for item in binding_objs]
def _test_auto_schedule(self, host_index):
self.config(dhcp_agents_per_network=self.max_agents_per_network)
scheduler = dhcp_agent_scheduler.ChanceScheduler()
self.ctx = context.get_admin_context()
msg = 'host_index = %s' % host_index
# create dhcp agents
hosts = ['%s-agent-%s' % (host_index, i)
for i in range(self.agent_count)]
dhcp_agents = self._create_and_set_agents_down(hosts)
# create networks
self._networks = [
network.Network(
self.ctx,
id=uuidutils.generate_uuid(),
name='%s-network-%s' % (host_index, i))
for i in range(self.network_count)
]
for i in range(len(self._networks)):
self._networks[i].create()
network_ids = [net.id for net in self._networks]
# pre schedule the networks to the agents defined in
# self.hosted_networks before calling auto_schedule_network
for agent, networks in self.hosted_networks.items():
agent_index = self._extract_index(agent)
for net in networks:
net_index = self._extract_index(net)
scheduler.resource_filter.bind(self.ctx,
[dhcp_agents[agent_index]],
network_ids[net_index])
retval = scheduler.auto_schedule_networks(self, self.ctx,
hosts[host_index])
self.assertEqual(self.expected_auto_schedule_return_value, retval,
message=msg)
agent_id = dhcp_agents[host_index].id
hosted_networks = self._get_hosted_networks_on_dhcp_agent(agent_id)
hosted_net_names = [
self._strip_host_index(net['name'])
for net in network.Network.get_objects(
self.ctx, id=hosted_networks)
]
expected_hosted_networks = self.expected_hosted_networks['agent-%s' %
host_index]
self.assertItemsEqual(hosted_net_names, expected_hosted_networks, msg)
def test_auto_schedule(self):
for i in range(self.agent_count):
self._test_auto_schedule(i)
class TestAZAwareWeightScheduler(test_dhcp_sch.TestDhcpSchedulerBaseTestCase,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
agents_db.AgentDbMixin,
common_db_mixin.CommonDbMixin):
"""Test various scenarios for AZAwareWeightScheduler.schedule.
az_count
Number of AZs.
network_az_hints
Number of AZs in availability_zone_hints of the network.
agent_count[each az]
Number of dhcp agents (also number of hosts).
max_agents_per_network
Maximum DHCP Agents that can be scheduled for a network.
scheduled_agent_count[each az]
Number of agents the network has previously scheduled
down_agent_count[each az]
Number of dhcp agents which are down
expected_scheduled_agent_count[each az]
Number of scheduled agents the schedule() should return
or 'None' if the schedule() cannot schedule the network.
"""
scenarios = [
('Single hint, Single agent, Scheduled an agent of the specified AZ',
dict(az_count=2,
network_az_hints=1,
agent_count=[1, 1],
max_agents_per_network=1,
scheduled_agent_count=[0, 0],
down_agent_count=[0, 0],
expected_scheduled_agent_count=[1, 0])),
('Multi hints, Multi agents Scheduled agents of the specified AZs',
dict(az_count=3,
network_az_hints=2,
agent_count=[1, 1, 1],
max_agents_per_network=2,
scheduled_agent_count=[0, 0, 0],
down_agent_count=[0, 0, 0],
expected_scheduled_agent_count=[1, 1, 0])),
('Single hint, Multi agents, Scheduled agents of the specified AZ',
dict(az_count=2,
network_az_hints=1,
agent_count=[2, 1],
max_agents_per_network=2,
scheduled_agent_count=[0, 0],
down_agent_count=[0, 0],
expected_scheduled_agent_count=[2, 0])),
('Multi hints, Multi agents, Only single AZ available',
dict(az_count=2,
network_az_hints=2,
agent_count=[2, 1],
max_agents_per_network=2,
scheduled_agent_count=[0, 0],
down_agent_count=[0, 1],
expected_scheduled_agent_count=[2, 0])),
('Multi hints, Multi agents, Not enough agents',
dict(az_count=3,
network_az_hints=3,
agent_count=[1, 1, 1],
max_agents_per_network=3,
scheduled_agent_count=[0, 0, 0],
down_agent_count=[0, 1, 0],
expected_scheduled_agent_count=[1, 0, 1])),
('Multi hints, Multi agents, Partially scheduled, Another AZ selected',
dict(az_count=3,
network_az_hints=2,
agent_count=[1, 1, 1],
max_agents_per_network=2,
scheduled_agent_count=[1, 0, 0],
down_agent_count=[0, 0, 0],
expected_scheduled_agent_count=[0, 1, 0])),
('No hint, Scheduled independent to AZ',
dict(az_count=3,
network_az_hints=0,
agent_count=[1, 1, 1],
max_agents_per_network=3,
scheduled_agent_count=[0, 0, 0],
down_agent_count=[0, 0, 0],
expected_scheduled_agent_count=[1, 1, 1])),
]
def _set_network_az_hints(self):
self.network['availability_zone_hints'] = []
for i in range(self.network_az_hints):
self.network['availability_zone_hints'].append('az%s' % i)
def test_schedule_network(self):
self.config(dhcp_agents_per_network=self.max_agents_per_network)
scheduler = dhcp_agent_scheduler.AZAwareWeightScheduler()
self._set_network_az_hints()
# create dhcp agents
for i in range(self.az_count):
az = 'az%s' % i
hosts = ['%s-host-%s' % (az, j)
for j in range(self.agent_count[i])]
dhcp_agents = self._create_and_set_agents_down(
hosts, down_agent_count=self.down_agent_count[i], az=az)
active_agents = dhcp_agents[self.down_agent_count[i]:]
# schedule some agents before calling schedule
if self.scheduled_agent_count[i]:
# schedule the network
schedule_agents = active_agents[:self.scheduled_agent_count[i]]
scheduler.resource_filter.bind(
self.ctx, schedule_agents, self.network_id)
actual_scheduled_agents = scheduler.schedule(self, self.ctx,
self.network)
scheduled_azs = collections.defaultdict(int)
for agent in actual_scheduled_agents:
scheduled_azs[agent['availability_zone']] += 1
hosted_agents = self.list_dhcp_agents_hosting_network(
self.ctx, self.network_id)
hosted_azs = collections.defaultdict(int)
for agent in hosted_agents['agents']:
hosted_azs[agent['availability_zone']] += 1
for i in range(self.az_count):
self.assertEqual(self.expected_scheduled_agent_count[i],
scheduled_azs.get('az%s' % i, 0))
self.assertEqual(self.scheduled_agent_count[i] +
scheduled_azs.get('az%s' % i, 0),
hosted_azs.get('az%s' % i, 0))
class TestDHCPSchedulerWithNetworkAccessibility(
test_plugin.Ml2PluginV2TestCase):
_mechanism_drivers = ['openvswitch']
def test_dhcp_scheduler_filters_hosts_without_network_access(self):
dhcp_agent1 = helpers.register_dhcp_agent(host='host1')
dhcp_agent2 = helpers.register_dhcp_agent(host='host2')
dhcp_agent3 = helpers.register_dhcp_agent(host='host3')
dhcp_agents = [dhcp_agent1, dhcp_agent2, dhcp_agent3]
helpers.register_ovs_agent(
host='host1', bridge_mappings={'physnet1': 'br-eth-1'})
helpers.register_ovs_agent(
host='host2', bridge_mappings={'physnet2': 'br-eth-1'})
helpers.register_ovs_agent(
host='host3', bridge_mappings={'physnet2': 'br-eth-1'})
admin_context = context.get_admin_context()
net = self.driver.create_network(
admin_context,
{'network': {'name': 'net1',
providernet.NETWORK_TYPE: 'vlan',
providernet.PHYSICAL_NETWORK: 'physnet1',
providernet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one',
'admin_state_up': True,
'shared': True}})
self.driver.create_subnet(
admin_context,
{'subnet':
{'name': 'name',
'ip_version': 4,
'network_id': net['id'],
'cidr': '10.0.0.0/24',
'gateway_ip': constants.ATTR_NOT_SPECIFIED,
'allocation_pools': constants.ATTR_NOT_SPECIFIED,
'dns_nameservers': constants.ATTR_NOT_SPECIFIED,
'host_routes': constants.ATTR_NOT_SPECIFIED,
'tenant_id': 'tenant_one',
'enable_dhcp': True}})
self.plugin.schedule_network(admin_context, net)
dhcp_agents = self.driver.get_dhcp_agents_hosting_networks(
admin_context, [net['id']])
self.assertEqual(1, len(dhcp_agents))
self.assertEqual('host1', dhcp_agents[0]['host'])
| apache-2.0 |
guewen/OpenUpgrade | addons/stock_account/stock.py | 33 | 11505 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class stock_location_path(osv.osv):
_inherit = "stock.location.path"
_columns = {
'invoice_state': fields.selection([
("invoiced", "Invoiced"),
("2binvoiced", "To Be Invoiced"),
("none", "Not Applicable")], "Invoice Status",
required=True,),
}
_defaults = {
'invoice_state': 'none',
}
#----------------------------------------------------------
# Procurement Rule
#----------------------------------------------------------
class procurement_rule(osv.osv):
_inherit = 'procurement.rule'
_columns = {
'invoice_state': fields.selection([
("invoiced", "Invoiced"),
("2binvoiced", "To Be Invoiced"),
("none", "Not Applicable")], "Invoice Status",
required=True),
}
_defaults = {
'invoice_state': 'none',
}
#----------------------------------------------------------
# Procurement Order
#----------------------------------------------------------
class procurement_order(osv.osv):
_inherit = "procurement.order"
_columns = {
'invoice_state': fields.selection([("invoiced", "Invoiced"),
("2binvoiced", "To Be Invoiced"),
("none", "Not Applicable")
], "Invoice Control", required=True),
}
def _run_move_create(self, cr, uid, procurement, context=None):
res = super(procurement_order, self)._run_move_create(cr, uid, procurement, context=context)
res.update({'invoice_state': (procurement.rule_id.invoice_state in ('none', False) and procurement.invoice_state or procurement.rule_id.invoice_state) or 'none'})
return res
_defaults = {
'invoice_state': 'none'
}
#----------------------------------------------------------
# Move
#----------------------------------------------------------
class stock_move(osv.osv):
_inherit = "stock.move"
_columns = {
'invoice_state': fields.selection([("invoiced", "Invoiced"),
("2binvoiced", "To Be Invoiced"),
("none", "Not Applicable")], "Invoice Control",
select=True, required=True, track_visibility='onchange',
states={'draft': [('readonly', False)]}),
}
_defaults = {
'invoice_state': lambda *args, **argv: 'none'
}
def _get_master_data(self, cr, uid, move, company, context=None):
''' returns a tupple (browse_record(res.partner), ID(res.users), ID(res.currency)'''
return move.picking_id.partner_id, uid, company.currency_id.id
def _create_invoice_line_from_vals(self, cr, uid, move, invoice_line_vals, context=None):
return self.pool.get('account.invoice.line').create(cr, uid, invoice_line_vals, context=context)
def _get_price_unit_invoice(self, cr, uid, move_line, type, context=None):
""" Gets price unit for invoice
@param move_line: Stock move lines
@param type: Type of invoice
@return: The price unit for the move line
"""
if context is None:
context = {}
if type in ('in_invoice', 'in_refund'):
# Take the user company and pricetype
context['currency_id'] = move_line.company_id.currency_id.id
amount_unit = move_line.product_id.price_get('standard_price', context=context)[move_line.product_id.id]
return amount_unit
return move_line.product_id.list_price
def _get_invoice_line_vals(self, cr, uid, move, partner, inv_type, context=None):
fp_obj = self.pool.get('account.fiscal.position')
# Get account_id
if inv_type in ('out_invoice', 'out_refund'):
account_id = move.product_id.property_account_income.id
if not account_id:
account_id = move.product_id.categ_id.property_account_income_categ.id
else:
account_id = move.product_id.property_account_expense.id
if not account_id:
account_id = move.product_id.categ_id.property_account_expense_categ.id
fiscal_position = partner.property_account_position
account_id = fp_obj.map_account(cr, uid, fiscal_position, account_id)
# set UoS if it's a sale and the picking doesn't have one
uos_id = move.product_uom.id
quantity = move.product_uom_qty
if move.product_uos:
uos_id = move.product_uos.id
quantity = move.product_uos_qty
return {
'name': move.name,
'account_id': account_id,
'product_id': move.product_id.id,
'uos_id': uos_id,
'quantity': quantity,
'price_unit': self._get_price_unit_invoice(cr, uid, move, inv_type),
'discount': 0.0,
'account_analytic_id': False,
}
#----------------------------------------------------------
# Picking
#----------------------------------------------------------
class stock_picking(osv.osv):
_inherit = 'stock.picking'
def __get_invoice_state(self, cr, uid, ids, name, arg, context=None):
result = {}
for pick in self.browse(cr, uid, ids, context=context):
result[pick.id] = 'none'
for move in pick.move_lines:
if move.invoice_state == 'invoiced':
result[pick.id] = 'invoiced'
elif move.invoice_state == '2binvoiced':
result[pick.id] = '2binvoiced'
break
return result
def __get_picking_move(self, cr, uid, ids, context={}):
res = []
for move in self.pool.get('stock.move').browse(cr, uid, ids, context=context):
if move.picking_id:
res.append(move.picking_id.id)
return res
_columns = {
'invoice_state': fields.function(__get_invoice_state, type='selection', selection=[
("invoiced", "Invoiced"),
("2binvoiced", "To Be Invoiced"),
("none", "Not Applicable")
], string="Invoice Control", required=True,
store={
'stock.picking': (lambda self, cr, uid, ids, c={}: ids, ['state'], 10),
'stock.move': (__get_picking_move, ['picking_id', 'invoice_state'], 10),
},
),
}
_defaults = {
'invoice_state': lambda *args, **argv: 'none'
}
def _create_invoice_from_picking(self, cr, uid, picking, vals, context=None):
''' This function simply creates the invoice from the given values. It is overriden in delivery module to add the delivery costs.
'''
invoice_obj = self.pool.get('account.invoice')
return invoice_obj.create(cr, uid, vals, context=context)
def action_invoice_create(self, cr, uid, ids, journal_id, group=False, type='out_invoice', context=None):
""" Creates invoice based on the invoice state selected for picking.
@param journal_id: Id of journal
@param group: Whether to create a group invoice or not
@param type: Type invoice to be created
@return: Ids of created invoices for the pickings
"""
context = context or {}
todo = {}
for picking in self.browse(cr, uid, ids, context=context):
key = group and picking.id or True
for move in picking.move_lines:
if move.procurement_id and (move.procurement_id.invoice_state == '2binvoiced') or move.invoice_state == '2binvoiced':
if (move.state != 'cancel') and not move.scrapped:
todo.setdefault(key, [])
todo[key].append(move)
invoices = []
for moves in todo.values():
invoices = self._invoice_create_line(cr, uid, moves, journal_id, type, context=context)
return invoices
def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, origin, context=None):
if context is None:
context = {}
partner, currency_id, company_id, user_id = key
if inv_type in ('out_invoice', 'out_refund'):
account_id = partner.property_account_receivable.id
payment_term = partner.property_payment_term.id or False
else:
account_id = partner.property_account_payable.id
payment_term = partner.property_supplier_payment_term.id or False
return {
'origin': origin,
'date_invoice': context.get('date_inv', False),
'user_id': user_id,
'partner_id': partner.id,
'account_id': account_id,
'payment_term': payment_term,
'type': inv_type,
'fiscal_position': partner.property_account_position.id,
'company_id': company_id,
'currency_id': currency_id,
'journal_id': journal_id,
}
def _invoice_create_line(self, cr, uid, moves, journal_id, inv_type='out_invoice', context=None):
invoice_obj = self.pool.get('account.invoice')
move_obj = self.pool.get('stock.move')
invoices = {}
for move in moves:
company = move.company_id
origin = move.picking_id.name
partner, user_id, currency_id = move_obj._get_master_data(cr, uid, move, company, context=context)
key = (partner, currency_id, company.id, user_id)
if key not in invoices:
# Get account and payment terms
invoice_vals = self._get_invoice_vals(cr, uid, key, inv_type, journal_id, origin, context=context)
invoice_id = self._create_invoice_from_picking(cr, uid, move.picking_id, invoice_vals, context=context)
invoices[key] = invoice_id
invoice_line_vals = move_obj._get_invoice_line_vals(cr, uid, move, partner, inv_type, context=context)
invoice_line_vals['invoice_id'] = invoices[key]
invoice_line_vals['origin'] = origin
move_obj._create_invoice_line_from_vals(cr, uid, move, invoice_line_vals, context=context)
move_obj.write(cr, uid, move.id, {'invoice_state': 'invoiced'}, context=context)
if move.procurement_id:
self.pool.get('procurement.order').write(cr, uid, [move.procurement_id.id], {
'invoice_state': 'invoiced',
}, context=context)
invoice_obj.button_compute(cr, uid, invoices.values(), context=context, set_total=(inv_type in ('in_invoice', 'in_refund')))
return invoices.values()
| agpl-3.0 |
petrus-v/odoo | addons/mrp/report/mrp_report.py | 341 | 3839 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
class report_workcenter_load(osv.osv):
_name="report.workcenter.load"
_description="Work Center Load"
_auto = False
_log_access = False
_columns = {
'name': fields.char('Week', required=True),
'workcenter_id': fields.many2one('mrp.workcenter', 'Work Center', required=True),
'cycle': fields.float('Number of Cycles'),
'hour': fields.float('Number of Hours'),
}
def init(self, cr):
cr.execute("""
create or replace view report_workcenter_load as (
SELECT
min(wl.id) as id,
to_char(p.date_planned,'YYYY:mm:dd') as name,
SUM(wl.hour) AS hour,
SUM(wl.cycle) AS cycle,
wl.workcenter_id as workcenter_id
FROM
mrp_production_workcenter_line wl
LEFT JOIN mrp_production p
ON p.id = wl.production_id
GROUP BY
wl.workcenter_id,
to_char(p.date_planned,'YYYY:mm:dd')
)""")
class report_mrp_inout(osv.osv):
_name="report.mrp.inout"
_description="Stock value variation"
_auto = False
_log_access = False
_rec_name = 'date'
_columns = {
'date': fields.char('Week', required=True),
'value': fields.float('Stock value', required=True, digits=(16,2)),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
def init(self, cr):
cr.execute("""
create or replace view report_mrp_inout as (
select
min(sm.id) as id,
to_char(sm.date,'YYYY:IW') as date,
sum(case when (sl.usage='internal') then
sm.price_unit * sm.product_qty
else
0.0
end - case when (sl2.usage='internal') then
sm.price_unit * sm.product_qty
else
0.0
end) as value,
sm.company_id
from
stock_move sm
left join product_product pp
on (pp.id = sm.product_id)
left join product_template pt
on (pt.id = pp.product_tmpl_id)
left join stock_location sl
on ( sl.id = sm.location_id)
left join stock_location sl2
on ( sl2.id = sm.location_dest_id)
where
sm.state = 'done'
group by
to_char(sm.date,'YYYY:IW'), sm.company_id
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
TUW-GEO/SMDC-performance | tests/test_test_cases.py | 1 | 11151 | # Copyright (c) 2013,Vienna University of Technology,
# Department of Geodesy and Geoinformation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Vienna University of Technology,
# Department of Geodesy and Geoinformation nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL VIENNA UNIVERSITY OF TECHNOLOGY,
# DEPARTMENT OF GEODESY AND GEOINFORMATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Tests basic functionality of the test_cases module
Created on Thu Nov 20 13:38:43 2014
@author: Christoph.Paulik@geo.tuwien.ac.at
'''
import smdc_perftests.performance_tests.test_cases as test_cases
import smdc_perftests.helper as helper
import datetime as dt
import time
import math
import pytest
from .fixtures import tempdir
class FakeDataset(object):
"""
Fake Dataset that provides routines for reading
time series and images
that do nothing
"""
def __init__(self, sleep_time=0.0001):
pass
self.ts_read = 0
self.sleep_time = sleep_time
self.img_read = 0
self.cells_read = 0
def get_timeseries(self, gpi, date_start=None, date_end=None):
time.sleep(self.sleep_time)
self.ts_read += 1
return None
def get_avg_image(self, date_start, date_end=None, cell_id=None):
"""
Image readers generally return more than one
variable. This should not matter for these tests.
"""
time.sleep(self.sleep_time)
assert type(date_start) == dt.datetime
self.img_read += 1
return None, None, None, None, None
def get_data(self, date_start, date_end, cell_id):
"""
Image readers generally return more than one
variable. This should not matter for these tests.
"""
time.sleep(self.sleep_time)
assert type(date_start) == dt.datetime
assert type(date_end) == dt.datetime
self.cells_read += 1
return None, None, None, None, None
def test_measure_output_format():
"""
test if the measure decorator returns the
results in the expected format
"""
@test_cases.measure('test_output_format', runs=3)
def test():
time.sleep(0.5)
results = test()
assert results.n == 3
assert len(results.confidence_int()) == 3
def test_run_rand_by_gpi_list():
"""
tests run by gpi list
Does no assertions at the moment, but shows how to use
the class
"""
fd = FakeDataset()
# setup grid point index list, must come from grid object or
# sciDB
# this test dataset has 10000 gpis of which 20 percent will be read
gpi_list = range(10000)
@test_cases.measure('test_rand_gpi', runs=3)
def test():
test_cases.read_rand_ts_by_gpi_list(fd, gpi_list)
results = test()
assert fd.ts_read == 10000 * 0.01 * 3
def test_run_rand_by_date_list():
"""
tests run by date list
Does no assertions at the moment, but shows how to use
the class
"""
fd = FakeDataset()
# setup grid point index list, must come from grid object or
# sciDB
# this test dataset has 1 year of dates of which 20 percent will be read
date_list = []
for days in range(365):
date_list.append(dt.datetime(2007, 1, 1) + dt.timedelta(days=days))
@test_cases.measure('test_rand_date', runs=3)
def test():
test_cases.read_rand_img_by_date_list(fd, date_list)
results = test()
assert fd.img_read == math.ceil(365 * 0.01) * 3
def test_run_rand_by_cell_list():
"""
tests run by cell list
Does no assertions at the moment, but shows how to use
the class
"""
fd = FakeDataset()
cell_list = range(500)
date_start = dt.datetime(2007, 1, 1)
date_end = dt.datetime(2008, 1, 1)
date_list = helper.generate_date_list(date_start, date_end, n=len(cell_list),
max_spread=5, min_spread=5)
@test_cases.measure('test_rand_cells', runs=3)
def test():
test_cases.read_rand_cells_by_cell_list(fd, date_list, cell_list)
results = test()
assert fd.cells_read == 500 * 0.01 * 3
def test_results_comparison():
"""
Tests the comparison operators of
the TestResults class
"""
list1 = [5.8, 6.3, 6.2, 5.2, 4.3, 6.1, 4.2, 5.5]
list2 = [6.7, 8.3, 9.4, 7.3, 8.5]
list3 = [6.7, 8.3, 9.4, 7.3]
res1 = test_cases.TestResults(list1, 'list1')
res2 = test_cases.TestResults(list2, 'list2')
res3 = test_cases.TestResults(list3, 'list3')
assert res1 < res2
assert res2 > res1
assert not res1 < res3
assert not res3 > res2
def test_TestResults_init():
"""
tests if the correct exceptions are raised
when a TestResults object is initialized
wrongly
"""
with pytest.raises(ValueError):
res1 = test_cases.TestResults([1])
def test_to_netcdf(tempdir):
"""
Writing to netCDF and reading again from file.
"""
list1 = [5.8, 6.3, 6.2, 5.2, 4.3, 6.1, 4.2, 5.5]
res1 = test_cases.TestResults(list1, 'list1')
res1.to_nc("test.nc")
res2 = test_cases.TestResults("test.nc")
assert res1._measurements == res2._measurements
assert res1.name == res2.name
def test_self_timing_dataset():
fd = FakeDataset()
std = test_cases.SelfTimingDataset(fd)
std.get_timeseries(12)
def test_run_rand_by_gpi_list_self_timing():
"""
tests run by gpi list
Does no assertions at the moment, but shows how to use
the class
"""
fd = FakeDataset()
std = test_cases.SelfTimingDataset(fd)
# setup grid point index list, must come from grid object or
# sciDB
# this test dataset has 10000 gpis of which 20 percent will be read
gpi_list = range(10000)
@test_cases.measure('test_rand_gpi', runs=3)
def test():
test_cases.read_rand_ts_by_gpi_list(std, gpi_list)
results = test()
assert std.ts_read == 10000 * 0.01 * 3
assert len(std.measurements['get_timeseries']) == 300
def test_run_rand_by_gpi_list_self_timing_max_runtime():
"""
tests run by gpi list
"""
fd = FakeDataset(sleep_time=0.01)
std = test_cases.SelfTimingDataset(fd)
# setup grid point index list, must come from grid object or
# sciDB
# this test dataset has 10000 gpis of which 20 percent will be read
gpi_list = range(10000)
@test_cases.measure('test_rand_gpi', runs=3)
def test():
test_cases.read_rand_ts_by_gpi_list(std, gpi_list, max_runtime=0.5)
results = test()
assert std.ts_read <= (10000 * 0.01 * 3) / 2
assert len(std.measurements['get_timeseries']) <= 150
def test_run_rand_by_date_list_self_timing():
"""
tests run by date list
Does no assertions at the moment, but shows how to use
the class
"""
fd = FakeDataset()
std = test_cases.SelfTimingDataset(fd)
# setup grid point index list, must come from grid object or
# sciDB
# this test dataset has 1 year of dates of which 20 percent will be read
date_list = []
for days in range(365):
date_list.append(dt.datetime(2007, 1, 1) + dt.timedelta(days=days))
@test_cases.measure('test_rand_date', runs=3)
def test():
test_cases.read_rand_img_by_date_list(std, date_list)
results = test()
assert std.img_read == math.ceil(365 * 0.01) * 3
assert len(std.measurements['get_avg_image']) == math.ceil(365 * 0.01) * 3
def test_run_rand_by_date_list_self_timing_max_runtime():
"""
tests run by date list
Does no assertions at the moment, but shows how to use
the class
"""
fd = FakeDataset(sleep_time=0.1)
std = test_cases.SelfTimingDataset(fd)
# setup grid point index list, must come from grid object or
# sciDB
# this test dataset has 1 year of dates of which 4 images will be read
date_list = []
for days in range(365):
date_list.append(dt.datetime(2007, 1, 1) + dt.timedelta(days=days))
@test_cases.measure('test_rand_date', runs=3)
def test():
test_cases.read_rand_img_by_date_list(std, date_list, max_runtime=0.3)
results = test()
assert std.img_read == 9
assert len(std.measurements['get_avg_image']) == 9
def test_run_rand_by_cell_list_self_timing():
"""
tests run by cell list
Does no assertions at the moment, but shows how to use
the class
"""
fd = FakeDataset()
std = test_cases.SelfTimingDataset(fd)
cell_list = range(500)
date_start = dt.datetime(2007, 1, 1)
date_end = dt.datetime(2008, 1, 1)
date_list = helper.generate_date_list(date_start, date_end, n=len(cell_list),
max_spread=5, min_spread=5)
@test_cases.measure('test_rand_cells', runs=3)
def test():
test_cases.read_rand_cells_by_cell_list(std, date_list, cell_list)
results = test()
assert std.cells_read == 500 * 0.01 * 3
assert len(std.measurements['get_data']) == std.cells_read
def test_run_rand_by_cell_list_self_timing_max_runtime():
"""
tests run by cell list using a SelfTimingDataset and restricting the
maximum runtime
"""
fd = FakeDataset(sleep_time=0.1)
std = test_cases.SelfTimingDataset(fd)
cell_list = range(500)
date_start = dt.datetime(2007, 1, 1)
date_end = dt.datetime(2008, 1, 1)
date_list = helper.generate_date_list(date_start, date_end, n=len(cell_list),
max_spread=5, min_spread=5)
@test_cases.measure('test_rand_cells', runs=3)
def test():
test_cases.read_rand_cells_by_cell_list(std, date_list, cell_list,
max_runtime=0.4)
results = test()
assert std.cells_read == 12
assert len(std.measurements['get_data']) == std.cells_read
if __name__ == '__main__':
test_self_timing_dataset()
| bsd-3-clause |
mnuthan1/workflow | lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/jisfreq.py | 3131 | 47315 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
#
# Japanese frequency table, applied to both S-JIS and EUC-JP
# They are sorted in order.
# 128 --> 0.77094
# 256 --> 0.85710
# 512 --> 0.92635
# 1024 --> 0.97130
# 2048 --> 0.99431
#
# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
#
# Typical Distribution Ratio, 25% of IDR
JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
# Char to FreqOrder table ,
JIS_TABLE_SIZE = 4368
JISCharToFreqOrder = (
40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
#Everything below is of no interest for detection purpose
2138,2122,3730,2888,1995,1820,1044,6190,6191,6192,6193,6194,6195,6196,6197,6198, # 4384
6199,6200,6201,6202,6203,6204,6205,4670,6206,6207,6208,6209,6210,6211,6212,6213, # 4400
6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,6224,6225,6226,6227,6228,6229, # 4416
6230,6231,6232,6233,6234,6235,6236,6237,3187,6238,6239,3969,6240,6241,6242,6243, # 4432
6244,4671,6245,6246,4672,6247,6248,4133,6249,6250,4364,6251,2923,2556,2613,4673, # 4448
4365,3970,6252,6253,6254,6255,4674,6256,6257,6258,2768,2353,4366,4675,4676,3188, # 4464
4367,3463,6259,4134,4677,4678,6260,2267,6261,3842,3332,4368,3543,6262,6263,6264, # 4480
3013,1954,1928,4135,4679,6265,6266,2478,3091,6267,4680,4369,6268,6269,1699,6270, # 4496
3544,4136,4681,6271,4137,6272,4370,2804,6273,6274,2593,3971,3972,4682,6275,2236, # 4512
4683,6276,6277,4684,6278,6279,4138,3973,4685,6280,6281,3258,6282,6283,6284,6285, # 4528
3974,4686,2841,3975,6286,6287,3545,6288,6289,4139,4687,4140,6290,4141,6291,4142, # 4544
6292,6293,3333,6294,6295,6296,4371,6297,3399,6298,6299,4372,3976,6300,6301,6302, # 4560
4373,6303,6304,3843,3731,6305,4688,4374,6306,6307,3259,2294,6308,3732,2530,4143, # 4576
6309,4689,6310,6311,6312,3048,6313,6314,4690,3733,2237,6315,6316,2282,3334,6317, # 4592
6318,3844,6319,6320,4691,6321,3400,4692,6322,4693,6323,3049,6324,4375,6325,3977, # 4608
6326,6327,6328,3546,6329,4694,3335,6330,4695,4696,6331,6332,6333,6334,4376,3978, # 4624
6335,4697,3979,4144,6336,3980,4698,6337,6338,6339,6340,6341,4699,4700,4701,6342, # 4640
6343,4702,6344,6345,4703,6346,6347,4704,6348,4705,4706,3135,6349,4707,6350,4708, # 4656
6351,4377,6352,4709,3734,4145,6353,2506,4710,3189,6354,3050,4711,3981,6355,3547, # 4672
3014,4146,4378,3735,2651,3845,3260,3136,2224,1986,6356,3401,6357,4712,2594,3627, # 4688
3137,2573,3736,3982,4713,3628,4714,4715,2682,3629,4716,6358,3630,4379,3631,6359, # 4704
6360,6361,3983,6362,6363,6364,6365,4147,3846,4717,6366,6367,3737,2842,6368,4718, # 4720
2628,6369,3261,6370,2386,6371,6372,3738,3984,4719,3464,4720,3402,6373,2924,3336, # 4736
4148,2866,6374,2805,3262,4380,2704,2069,2531,3138,2806,2984,6375,2769,6376,4721, # 4752
4722,3403,6377,6378,3548,6379,6380,2705,3092,1979,4149,2629,3337,2889,6381,3338, # 4768
4150,2557,3339,4381,6382,3190,3263,3739,6383,4151,4723,4152,2558,2574,3404,3191, # 4784
6384,6385,4153,6386,4724,4382,6387,6388,4383,6389,6390,4154,6391,4725,3985,6392, # 4800
3847,4155,6393,6394,6395,6396,6397,3465,6398,4384,6399,6400,6401,6402,6403,6404, # 4816
4156,6405,6406,6407,6408,2123,6409,6410,2326,3192,4726,6411,6412,6413,6414,4385, # 4832
4157,6415,6416,4158,6417,3093,3848,6418,3986,6419,6420,3849,6421,6422,6423,4159, # 4848
6424,6425,4160,6426,3740,6427,6428,6429,6430,3987,6431,4727,6432,2238,6433,6434, # 4864
4386,3988,6435,6436,3632,6437,6438,2843,6439,6440,6441,6442,3633,6443,2958,6444, # 4880
6445,3466,6446,2364,4387,3850,6447,4388,2959,3340,6448,3851,6449,4728,6450,6451, # 4896
3264,4729,6452,3193,6453,4389,4390,2706,3341,4730,6454,3139,6455,3194,6456,3051, # 4912
2124,3852,1602,4391,4161,3853,1158,3854,4162,3989,4392,3990,4731,4732,4393,2040, # 4928
4163,4394,3265,6457,2807,3467,3855,6458,6459,6460,3991,3468,4733,4734,6461,3140, # 4944
2960,6462,4735,6463,6464,6465,6466,4736,4737,4738,4739,6467,6468,4164,2403,3856, # 4960
6469,6470,2770,2844,6471,4740,6472,6473,6474,6475,6476,6477,6478,3195,6479,4741, # 4976
4395,6480,2867,6481,4742,2808,6482,2493,4165,6483,6484,6485,6486,2295,4743,6487, # 4992
6488,6489,3634,6490,6491,6492,6493,6494,6495,6496,2985,4744,6497,6498,4745,6499, # 5008
6500,2925,3141,4166,6501,6502,4746,6503,6504,4747,6505,6506,6507,2890,6508,6509, # 5024
6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,3469,4167,6520,6521,6522,4748, # 5040
4396,3741,4397,4749,4398,3342,2125,4750,6523,4751,4752,4753,3052,6524,2961,4168, # 5056
6525,4754,6526,4755,4399,2926,4169,6527,3857,6528,4400,4170,6529,4171,6530,6531, # 5072
2595,6532,6533,6534,6535,3635,6536,6537,6538,6539,6540,6541,6542,4756,6543,6544, # 5088
6545,6546,6547,6548,4401,6549,6550,6551,6552,4402,3405,4757,4403,6553,6554,6555, # 5104
4172,3742,6556,6557,6558,3992,3636,6559,6560,3053,2726,6561,3549,4173,3054,4404, # 5120
6562,6563,3993,4405,3266,3550,2809,4406,6564,6565,6566,4758,4759,6567,3743,6568, # 5136
4760,3744,4761,3470,6569,6570,6571,4407,6572,3745,4174,6573,4175,2810,4176,3196, # 5152
4762,6574,4177,6575,6576,2494,2891,3551,6577,6578,3471,6579,4408,6580,3015,3197, # 5168
6581,3343,2532,3994,3858,6582,3094,3406,4409,6583,2892,4178,4763,4410,3016,4411, # 5184
6584,3995,3142,3017,2683,6585,4179,6586,6587,4764,4412,6588,6589,4413,6590,2986, # 5200
6591,2962,3552,6592,2963,3472,6593,6594,4180,4765,6595,6596,2225,3267,4414,6597, # 5216
3407,3637,4766,6598,6599,3198,6600,4415,6601,3859,3199,6602,3473,4767,2811,4416, # 5232
1856,3268,3200,2575,3996,3997,3201,4417,6603,3095,2927,6604,3143,6605,2268,6606, # 5248
3998,3860,3096,2771,6607,6608,3638,2495,4768,6609,3861,6610,3269,2745,4769,4181, # 5264
3553,6611,2845,3270,6612,6613,6614,3862,6615,6616,4770,4771,6617,3474,3999,4418, # 5280
4419,6618,3639,3344,6619,4772,4182,6620,2126,6621,6622,6623,4420,4773,6624,3018, # 5296
6625,4774,3554,6626,4183,2025,3746,6627,4184,2707,6628,4421,4422,3097,1775,4185, # 5312
3555,6629,6630,2868,6631,6632,4423,6633,6634,4424,2414,2533,2928,6635,4186,2387, # 5328
6636,4775,6637,4187,6638,1891,4425,3202,3203,6639,6640,4776,6641,3345,6642,6643, # 5344
3640,6644,3475,3346,3641,4000,6645,3144,6646,3098,2812,4188,3642,3204,6647,3863, # 5360
3476,6648,3864,6649,4426,4001,6650,6651,6652,2576,6653,4189,4777,6654,6655,6656, # 5376
2846,6657,3477,3205,4002,6658,4003,6659,3347,2252,6660,6661,6662,4778,6663,6664, # 5392
6665,6666,6667,6668,6669,4779,4780,2048,6670,3478,3099,6671,3556,3747,4004,6672, # 5408
6673,6674,3145,4005,3748,6675,6676,6677,6678,6679,3408,6680,6681,6682,6683,3206, # 5424
3207,6684,6685,4781,4427,6686,4782,4783,4784,6687,6688,6689,4190,6690,6691,3479, # 5440
6692,2746,6693,4428,6694,6695,6696,6697,6698,6699,4785,6700,6701,3208,2727,6702, # 5456
3146,6703,6704,3409,2196,6705,4429,6706,6707,6708,2534,1996,6709,6710,6711,2747, # 5472
6712,6713,6714,4786,3643,6715,4430,4431,6716,3557,6717,4432,4433,6718,6719,6720, # 5488
6721,3749,6722,4006,4787,6723,6724,3644,4788,4434,6725,6726,4789,2772,6727,6728, # 5504
6729,6730,6731,2708,3865,2813,4435,6732,6733,4790,4791,3480,6734,6735,6736,6737, # 5520
4436,3348,6738,3410,4007,6739,6740,4008,6741,6742,4792,3411,4191,6743,6744,6745, # 5536
6746,6747,3866,6748,3750,6749,6750,6751,6752,6753,6754,6755,3867,6756,4009,6757, # 5552
4793,4794,6758,2814,2987,6759,6760,6761,4437,6762,6763,6764,6765,3645,6766,6767, # 5568
3481,4192,6768,3751,6769,6770,2174,6771,3868,3752,6772,6773,6774,4193,4795,4438, # 5584
3558,4796,4439,6775,4797,6776,6777,4798,6778,4799,3559,4800,6779,6780,6781,3482, # 5600
6782,2893,6783,6784,4194,4801,4010,6785,6786,4440,6787,4011,6788,6789,6790,6791, # 5616
6792,6793,4802,6794,6795,6796,4012,6797,6798,6799,6800,3349,4803,3483,6801,4804, # 5632
4195,6802,4013,6803,6804,4196,6805,4014,4015,6806,2847,3271,2848,6807,3484,6808, # 5648
6809,6810,4441,6811,4442,4197,4443,3272,4805,6812,3412,4016,1579,6813,6814,4017, # 5664
6815,3869,6816,2964,6817,4806,6818,6819,4018,3646,6820,6821,4807,4019,4020,6822, # 5680
6823,3560,6824,6825,4021,4444,6826,4198,6827,6828,4445,6829,6830,4199,4808,6831, # 5696
6832,6833,3870,3019,2458,6834,3753,3413,3350,6835,4809,3871,4810,3561,4446,6836, # 5712
6837,4447,4811,4812,6838,2459,4448,6839,4449,6840,6841,4022,3872,6842,4813,4814, # 5728
6843,6844,4815,4200,4201,4202,6845,4023,6846,6847,4450,3562,3873,6848,6849,4816, # 5744
4817,6850,4451,4818,2139,6851,3563,6852,6853,3351,6854,6855,3352,4024,2709,3414, # 5760
4203,4452,6856,4204,6857,6858,3874,3875,6859,6860,4819,6861,6862,6863,6864,4453, # 5776
3647,6865,6866,4820,6867,6868,6869,6870,4454,6871,2869,6872,6873,4821,6874,3754, # 5792
6875,4822,4205,6876,6877,6878,3648,4206,4455,6879,4823,6880,4824,3876,6881,3055, # 5808
4207,6882,3415,6883,6884,6885,4208,4209,6886,4210,3353,6887,3354,3564,3209,3485, # 5824
2652,6888,2728,6889,3210,3755,6890,4025,4456,6891,4825,6892,6893,6894,6895,4211, # 5840
6896,6897,6898,4826,6899,6900,4212,6901,4827,6902,2773,3565,6903,4828,6904,6905, # 5856
6906,6907,3649,3650,6908,2849,3566,6909,3567,3100,6910,6911,6912,6913,6914,6915, # 5872
4026,6916,3355,4829,3056,4457,3756,6917,3651,6918,4213,3652,2870,6919,4458,6920, # 5888
2438,6921,6922,3757,2774,4830,6923,3356,4831,4832,6924,4833,4459,3653,2507,6925, # 5904
4834,2535,6926,6927,3273,4027,3147,6928,3568,6929,6930,6931,4460,6932,3877,4461, # 5920
2729,3654,6933,6934,6935,6936,2175,4835,2630,4214,4028,4462,4836,4215,6937,3148, # 5936
4216,4463,4837,4838,4217,6938,6939,2850,4839,6940,4464,6941,6942,6943,4840,6944, # 5952
4218,3274,4465,6945,6946,2710,6947,4841,4466,6948,6949,2894,6950,6951,4842,6952, # 5968
4219,3057,2871,6953,6954,6955,6956,4467,6957,2711,6958,6959,6960,3275,3101,4843, # 5984
6961,3357,3569,6962,4844,6963,6964,4468,4845,3570,6965,3102,4846,3758,6966,4847, # 6000
3878,4848,4849,4029,6967,2929,3879,4850,4851,6968,6969,1733,6970,4220,6971,6972, # 6016
6973,6974,6975,6976,4852,6977,6978,6979,6980,6981,6982,3759,6983,6984,6985,3486, # 6032
3487,6986,3488,3416,6987,6988,6989,6990,6991,6992,6993,6994,6995,6996,6997,4853, # 6048
6998,6999,4030,7000,7001,3211,7002,7003,4221,7004,7005,3571,4031,7006,3572,7007, # 6064
2614,4854,2577,7008,7009,2965,3655,3656,4855,2775,3489,3880,4222,4856,3881,4032, # 6080
3882,3657,2730,3490,4857,7010,3149,7011,4469,4858,2496,3491,4859,2283,7012,7013, # 6096
7014,2365,4860,4470,7015,7016,3760,7017,7018,4223,1917,7019,7020,7021,4471,7022, # 6112
2776,4472,7023,7024,7025,7026,4033,7027,3573,4224,4861,4034,4862,7028,7029,1929, # 6128
3883,4035,7030,4473,3058,7031,2536,3761,3884,7032,4036,7033,2966,2895,1968,4474, # 6144
3276,4225,3417,3492,4226,2105,7034,7035,1754,2596,3762,4227,4863,4475,3763,4864, # 6160
3764,2615,2777,3103,3765,3658,3418,4865,2296,3766,2815,7036,7037,7038,3574,2872, # 6176
3277,4476,7039,4037,4477,7040,7041,4038,7042,7043,7044,7045,7046,7047,2537,7048, # 6192
7049,7050,7051,7052,7053,7054,4478,7055,7056,3767,3659,4228,3575,7057,7058,4229, # 6208
7059,7060,7061,3660,7062,3212,7063,3885,4039,2460,7064,7065,7066,7067,7068,7069, # 6224
7070,7071,7072,7073,7074,4866,3768,4867,7075,7076,7077,7078,4868,3358,3278,2653, # 6240
7079,7080,4479,3886,7081,7082,4869,7083,7084,7085,7086,7087,7088,2538,7089,7090, # 6256
7091,4040,3150,3769,4870,4041,2896,3359,4230,2930,7092,3279,7093,2967,4480,3213, # 6272
4481,3661,7094,7095,7096,7097,7098,7099,7100,7101,7102,2461,3770,7103,7104,4231, # 6288
3151,7105,7106,7107,4042,3662,7108,7109,4871,3663,4872,4043,3059,7110,7111,7112, # 6304
3493,2988,7113,4873,7114,7115,7116,3771,4874,7117,7118,4232,4875,7119,3576,2336, # 6320
4876,7120,4233,3419,4044,4877,4878,4482,4483,4879,4484,4234,7121,3772,4880,1045, # 6336
3280,3664,4881,4882,7122,7123,7124,7125,4883,7126,2778,7127,4485,4486,7128,4884, # 6352
3214,3887,7129,7130,3215,7131,4885,4045,7132,7133,4046,7134,7135,7136,7137,7138, # 6368
7139,7140,7141,7142,7143,4235,7144,4886,7145,7146,7147,4887,7148,7149,7150,4487, # 6384
4047,4488,7151,7152,4888,4048,2989,3888,7153,3665,7154,4049,7155,7156,7157,7158, # 6400
7159,7160,2931,4889,4890,4489,7161,2631,3889,4236,2779,7162,7163,4891,7164,3060, # 6416
7165,1672,4892,7166,4893,4237,3281,4894,7167,7168,3666,7169,3494,7170,7171,4050, # 6432
7172,7173,3104,3360,3420,4490,4051,2684,4052,7174,4053,7175,7176,7177,2253,4054, # 6448
7178,7179,4895,7180,3152,3890,3153,4491,3216,7181,7182,7183,2968,4238,4492,4055, # 6464
7184,2990,7185,2479,7186,7187,4493,7188,7189,7190,7191,7192,4896,7193,4897,2969, # 6480
4494,4898,7194,3495,7195,7196,4899,4495,7197,3105,2731,7198,4900,7199,7200,7201, # 6496
4056,7202,3361,7203,7204,4496,4901,4902,7205,4497,7206,7207,2315,4903,7208,4904, # 6512
7209,4905,2851,7210,7211,3577,7212,3578,4906,7213,4057,3667,4907,7214,4058,2354, # 6528
3891,2376,3217,3773,7215,7216,7217,7218,7219,4498,7220,4908,3282,2685,7221,3496, # 6544
4909,2632,3154,4910,7222,2337,7223,4911,7224,7225,7226,4912,4913,3283,4239,4499, # 6560
7227,2816,7228,7229,7230,7231,7232,7233,7234,4914,4500,4501,7235,7236,7237,2686, # 6576
7238,4915,7239,2897,4502,7240,4503,7241,2516,7242,4504,3362,3218,7243,7244,7245, # 6592
4916,7246,7247,4505,3363,7248,7249,7250,7251,3774,4506,7252,7253,4917,7254,7255, # 6608
3284,2991,4918,4919,3219,3892,4920,3106,3497,4921,7256,7257,7258,4922,7259,4923, # 6624
3364,4507,4508,4059,7260,4240,3498,7261,7262,4924,7263,2992,3893,4060,3220,7264, # 6640
7265,7266,7267,7268,7269,4509,3775,7270,2817,7271,4061,4925,4510,3776,7272,4241, # 6656
4511,3285,7273,7274,3499,7275,7276,7277,4062,4512,4926,7278,3107,3894,7279,7280, # 6672
4927,7281,4513,7282,7283,3668,7284,7285,4242,4514,4243,7286,2058,4515,4928,4929, # 6688
4516,7287,3286,4244,7288,4517,7289,7290,7291,3669,7292,7293,4930,4931,4932,2355, # 6704
4933,7294,2633,4518,7295,4245,7296,7297,4519,7298,7299,4520,4521,4934,7300,4246, # 6720
4522,7301,7302,7303,3579,7304,4247,4935,7305,4936,7306,7307,7308,7309,3777,7310, # 6736
4523,7311,7312,7313,4248,3580,7314,4524,3778,4249,7315,3581,7316,3287,7317,3221, # 6752
7318,4937,7319,7320,7321,7322,7323,7324,4938,4939,7325,4525,7326,7327,7328,4063, # 6768
7329,7330,4940,7331,7332,4941,7333,4526,7334,3500,2780,1741,4942,2026,1742,7335, # 6784
7336,3582,4527,2388,7337,7338,7339,4528,7340,4250,4943,7341,7342,7343,4944,7344, # 6800
7345,7346,3020,7347,4945,7348,7349,7350,7351,3895,7352,3896,4064,3897,7353,7354, # 6816
7355,4251,7356,7357,3898,7358,3779,7359,3780,3288,7360,7361,4529,7362,4946,4530, # 6832
2027,7363,3899,4531,4947,3222,3583,7364,4948,7365,7366,7367,7368,4949,3501,4950, # 6848
3781,4951,4532,7369,2517,4952,4252,4953,3155,7370,4954,4955,4253,2518,4533,7371, # 6864
7372,2712,4254,7373,7374,7375,3670,4956,3671,7376,2389,3502,4065,7377,2338,7378, # 6880
7379,7380,7381,3061,7382,4957,7383,7384,7385,7386,4958,4534,7387,7388,2993,7389, # 6896
3062,7390,4959,7391,7392,7393,4960,3108,4961,7394,4535,7395,4962,3421,4536,7396, # 6912
4963,7397,4964,1857,7398,4965,7399,7400,2176,3584,4966,7401,7402,3422,4537,3900, # 6928
3585,7403,3782,7404,2852,7405,7406,7407,4538,3783,2654,3423,4967,4539,7408,3784, # 6944
3586,2853,4540,4541,7409,3901,7410,3902,7411,7412,3785,3109,2327,3903,7413,7414, # 6960
2970,4066,2932,7415,7416,7417,3904,3672,3424,7418,4542,4543,4544,7419,4968,7420, # 6976
7421,4255,7422,7423,7424,7425,7426,4067,7427,3673,3365,4545,7428,3110,2559,3674, # 6992
7429,7430,3156,7431,7432,3503,7433,3425,4546,7434,3063,2873,7435,3223,4969,4547, # 7008
4548,2898,4256,4068,7436,4069,3587,3786,2933,3787,4257,4970,4971,3788,7437,4972, # 7024
3064,7438,4549,7439,7440,7441,7442,7443,4973,3905,7444,2874,7445,7446,7447,7448, # 7040
3021,7449,4550,3906,3588,4974,7450,7451,3789,3675,7452,2578,7453,4070,7454,7455, # 7056
7456,4258,3676,7457,4975,7458,4976,4259,3790,3504,2634,4977,3677,4551,4260,7459, # 7072
7460,7461,7462,3907,4261,4978,7463,7464,7465,7466,4979,4980,7467,7468,2213,4262, # 7088
7469,7470,7471,3678,4981,7472,2439,7473,4263,3224,3289,7474,3908,2415,4982,7475, # 7104
4264,7476,4983,2655,7477,7478,2732,4552,2854,2875,7479,7480,4265,7481,4553,4984, # 7120
7482,7483,4266,7484,3679,3366,3680,2818,2781,2782,3367,3589,4554,3065,7485,4071, # 7136
2899,7486,7487,3157,2462,4072,4555,4073,4985,4986,3111,4267,2687,3368,4556,4074, # 7152
3791,4268,7488,3909,2783,7489,2656,1962,3158,4557,4987,1963,3159,3160,7490,3112, # 7168
4988,4989,3022,4990,4991,3792,2855,7491,7492,2971,4558,7493,7494,4992,7495,7496, # 7184
7497,7498,4993,7499,3426,4559,4994,7500,3681,4560,4269,4270,3910,7501,4075,4995, # 7200
4271,7502,7503,4076,7504,4996,7505,3225,4997,4272,4077,2819,3023,7506,7507,2733, # 7216
4561,7508,4562,7509,3369,3793,7510,3590,2508,7511,7512,4273,3113,2994,2616,7513, # 7232
7514,7515,7516,7517,7518,2820,3911,4078,2748,7519,7520,4563,4998,7521,7522,7523, # 7248
7524,4999,4274,7525,4564,3682,2239,4079,4565,7526,7527,7528,7529,5000,7530,7531, # 7264
5001,4275,3794,7532,7533,7534,3066,5002,4566,3161,7535,7536,4080,7537,3162,7538, # 7280
7539,4567,7540,7541,7542,7543,7544,7545,5003,7546,4568,7547,7548,7549,7550,7551, # 7296
7552,7553,7554,7555,7556,5004,7557,7558,7559,5005,7560,3795,7561,4569,7562,7563, # 7312
7564,2821,3796,4276,4277,4081,7565,2876,7566,5006,7567,7568,2900,7569,3797,3912, # 7328
7570,7571,7572,4278,7573,7574,7575,5007,7576,7577,5008,7578,7579,4279,2934,7580, # 7344
7581,5009,7582,4570,7583,4280,7584,7585,7586,4571,4572,3913,7587,4573,3505,7588, # 7360
5010,7589,7590,7591,7592,3798,4574,7593,7594,5011,7595,4281,7596,7597,7598,4282, # 7376
5012,7599,7600,5013,3163,7601,5014,7602,3914,7603,7604,2734,4575,4576,4577,7605, # 7392
7606,7607,7608,7609,3506,5015,4578,7610,4082,7611,2822,2901,2579,3683,3024,4579, # 7408
3507,7612,4580,7613,3226,3799,5016,7614,7615,7616,7617,7618,7619,7620,2995,3290, # 7424
7621,4083,7622,5017,7623,7624,7625,7626,7627,4581,3915,7628,3291,7629,5018,7630, # 7440
7631,7632,7633,4084,7634,7635,3427,3800,7636,7637,4582,7638,5019,4583,5020,7639, # 7456
3916,7640,3801,5021,4584,4283,7641,7642,3428,3591,2269,7643,2617,7644,4585,3592, # 7472
7645,4586,2902,7646,7647,3227,5022,7648,4587,7649,4284,7650,7651,7652,4588,2284, # 7488
7653,5023,7654,7655,7656,4589,5024,3802,7657,7658,5025,3508,4590,7659,7660,7661, # 7504
1969,5026,7662,7663,3684,1821,2688,7664,2028,2509,4285,7665,2823,1841,7666,2689, # 7520
3114,7667,3917,4085,2160,5027,5028,2972,7668,5029,7669,7670,7671,3593,4086,7672, # 7536
4591,4087,5030,3803,7673,7674,7675,7676,7677,7678,7679,4286,2366,4592,4593,3067, # 7552
2328,7680,7681,4594,3594,3918,2029,4287,7682,5031,3919,3370,4288,4595,2856,7683, # 7568
3509,7684,7685,5032,5033,7686,7687,3804,2784,7688,7689,7690,7691,3371,7692,7693, # 7584
2877,5034,7694,7695,3920,4289,4088,7696,7697,7698,5035,7699,5036,4290,5037,5038, # 7600
5039,7700,7701,7702,5040,5041,3228,7703,1760,7704,5042,3229,4596,2106,4089,7705, # 7616
4597,2824,5043,2107,3372,7706,4291,4090,5044,7707,4091,7708,5045,3025,3805,4598, # 7632
4292,4293,4294,3373,7709,4599,7710,5046,7711,7712,5047,5048,3806,7713,7714,7715, # 7648
5049,7716,7717,7718,7719,4600,5050,7720,7721,7722,5051,7723,4295,3429,7724,7725, # 7664
7726,7727,3921,7728,3292,5052,4092,7729,7730,7731,7732,7733,7734,7735,5053,5054, # 7680
7736,7737,7738,7739,3922,3685,7740,7741,7742,7743,2635,5055,7744,5056,4601,7745, # 7696
7746,2560,7747,7748,7749,7750,3923,7751,7752,7753,7754,7755,4296,2903,7756,7757, # 7712
7758,7759,7760,3924,7761,5057,4297,7762,7763,5058,4298,7764,4093,7765,7766,5059, # 7728
3925,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,3595,7777,4299,5060,4094, # 7744
7778,3293,5061,7779,7780,4300,7781,7782,4602,7783,3596,7784,7785,3430,2367,7786, # 7760
3164,5062,5063,4301,7787,7788,4095,5064,5065,7789,3374,3115,7790,7791,7792,7793, # 7776
7794,7795,7796,3597,4603,7797,7798,3686,3116,3807,5066,7799,7800,5067,7801,7802, # 7792
4604,4302,5068,4303,4096,7803,7804,3294,7805,7806,5069,4605,2690,7807,3026,7808, # 7808
7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824, # 7824
7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7840
7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,7856, # 7856
7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,7872, # 7872
7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,7888, # 7888
7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,7904, # 7904
7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,7920, # 7920
7921,7922,7923,7924,3926,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935, # 7936
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951, # 7952
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967, # 7968
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983, # 7984
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999, # 8000
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015, # 8016
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031, # 8032
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047, # 8048
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063, # 8064
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079, # 8080
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095, # 8096
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111, # 8112
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127, # 8128
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143, # 8144
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159, # 8160
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175, # 8176
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191, # 8192
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207, # 8208
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223, # 8224
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239, # 8240
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255, # 8256
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271) # 8272
# flake8: noqa
| apache-2.0 |
Tithen-Firion/youtube-dl | youtube_dl/extractor/europa.py | 58 | 3417 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
int_or_none,
orderedSet,
parse_duration,
qualities,
unified_strdate,
xpath_text
)
class EuropaIE(InfoExtractor):
_VALID_URL = r'https?://ec\.europa\.eu/avservices/(?:video/player|audio/audioDetails)\.cfm\?.*?\bref=(?P<id>[A-Za-z0-9-]+)'
_TESTS = [{
'url': 'http://ec.europa.eu/avservices/video/player.cfm?ref=I107758',
'md5': '574f080699ddd1e19a675b0ddf010371',
'info_dict': {
'id': 'I107758',
'ext': 'mp4',
'title': 'TRADE - Wikileaks on TTIP',
'description': 'NEW LIVE EC Midday press briefing of 11/08/2015',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20150811',
'duration': 34,
'view_count': int,
'formats': 'mincount:3',
}
}, {
'url': 'http://ec.europa.eu/avservices/video/player.cfm?sitelang=en&ref=I107786',
'only_matching': True,
}, {
'url': 'http://ec.europa.eu/avservices/audio/audioDetails.cfm?ref=I-109295&sitelang=en',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
playlist = self._download_xml(
'http://ec.europa.eu/avservices/video/player/playlist.cfm?ID=%s' % video_id, video_id)
def get_item(type_, preference):
items = {}
for item in playlist.findall('./info/%s/item' % type_):
lang, label = xpath_text(item, 'lg', default=None), xpath_text(item, 'label', default=None)
if lang and label:
items[lang] = label.strip()
for p in preference:
if items.get(p):
return items[p]
query = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
preferred_lang = query.get('sitelang', ('en', ))[0]
preferred_langs = orderedSet((preferred_lang, 'en', 'int'))
title = get_item('title', preferred_langs) or video_id
description = get_item('description', preferred_langs)
thumbnmail = xpath_text(playlist, './info/thumburl', 'thumbnail')
upload_date = unified_strdate(xpath_text(playlist, './info/date', 'upload date'))
duration = parse_duration(xpath_text(playlist, './info/duration', 'duration'))
view_count = int_or_none(xpath_text(playlist, './info/views', 'views'))
language_preference = qualities(preferred_langs[::-1])
formats = []
for file_ in playlist.findall('./files/file'):
video_url = xpath_text(file_, './url')
if not video_url:
continue
lang = xpath_text(file_, './lg')
formats.append({
'url': video_url,
'format_id': lang,
'format_note': xpath_text(file_, './lglabel'),
'language_preference': language_preference(lang)
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnmail,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
'formats': formats
}
| unlicense |
qedi-r/home-assistant | homeassistant/components/opentherm_gw/sensor.py | 1 | 2793 | """Support for OpenTherm Gateway sensors."""
import logging
from homeassistant.components.sensor import ENTITY_ID_FORMAT
from homeassistant.const import CONF_ID
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity, async_generate_entity_id
from .const import DATA_GATEWAYS, DATA_OPENTHERM_GW, SENSOR_INFO
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the OpenTherm Gateway sensors."""
sensors = []
for var, info in SENSOR_INFO.items():
device_class = info[0]
unit = info[1]
friendly_name_format = info[2]
sensors.append(
OpenThermSensor(
hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][config_entry.data[CONF_ID]],
var,
device_class,
unit,
friendly_name_format,
)
)
async_add_entities(sensors)
class OpenThermSensor(Entity):
"""Representation of an OpenTherm Gateway sensor."""
def __init__(self, gw_dev, var, device_class, unit, friendly_name_format):
"""Initialize the OpenTherm Gateway sensor."""
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, f"{var}_{gw_dev.gw_id}", hass=gw_dev.hass
)
self._gateway = gw_dev
self._var = var
self._value = None
self._device_class = device_class
self._unit = unit
self._friendly_name = friendly_name_format.format(gw_dev.name)
async def async_added_to_hass(self):
"""Subscribe to updates from the component."""
_LOGGER.debug("Added OpenTherm Gateway sensor %s", self._friendly_name)
async_dispatcher_connect(
self.hass, self._gateway.update_signal, self.receive_report
)
@callback
def receive_report(self, status):
"""Handle status updates from the component."""
value = status.get(self._var)
if isinstance(value, float):
value = f"{value:2.1f}"
self._value = value
self.async_schedule_update_ha_state()
@property
def name(self):
"""Return the friendly name of the sensor."""
return self._friendly_name
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def state(self):
"""Return the state of the device."""
return self._value
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit
@property
def should_poll(self):
"""Return False because entity pushes its state."""
return False
| apache-2.0 |
jobiols/odoomrp-wip | mrp_production_add_middle_stuff/__openerp__.py | 6 | 1542 | # -*- coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
"name": "MRP Production add middle stuff",
"version": "8.0.1.0.0",
"category": "Manufacturing",
"license": "AGPL-3",
"author": "OdooMRP team,"
"AvanzOSC,"
"Serv. Tecnol. Avanzados - Pedro M. Baeza",
"contributors": [
"Daniel Campos <danielcampos@avanzosc.es>",
"Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>",
"Ana Juaristi <anajuaristi@avanzosc.es>",
"Oihane Crucelaegui <oihanecrucelaegi@avanzosc.es>",
],
"website": "http://www.odoomrp.com",
"depends": ["mrp"],
"data": ["wizard/addition_wizard_view.xml",
"views/mrp_production_view.xml"],
"installable": True,
}
| agpl-3.0 |
eyohansa/django | tests/order_with_respect_to/tests.py | 137 | 4286 | from __future__ import unicode_literals
from operator import attrgetter
from django.db import models
from django.test import TestCase
from .models import Answer, Post, Question
class OrderWithRespectToTests(TestCase):
# Hook to allow subclasses to run these tests with alternate models.
Answer = Answer
Question = Question
@classmethod
def setUpTestData(cls):
cls.q1 = cls.Question.objects.create(text="Which Beatle starts with the letter 'R'?")
cls.Answer.objects.create(text="John", question=cls.q1)
cls.Answer.objects.create(text="Paul", question=cls.q1)
cls.Answer.objects.create(text="George", question=cls.q1)
cls.Answer.objects.create(text="Ringo", question=cls.q1)
def test_default_to_insertion_order(self):
# Answers will always be ordered in the order they were inserted.
self.assertQuerysetEqual(
self.q1.answer_set.all(), [
"John", "Paul", "George", "Ringo",
],
attrgetter("text"),
)
def test_previous_and_next_in_order(self):
# We can retrieve the answers related to a particular object, in the
# order they were created, once we have a particular object.
a1 = self.q1.answer_set.all()[0]
self.assertEqual(a1.text, "John")
self.assertEqual(a1.get_next_in_order().text, "Paul")
a2 = list(self.q1.answer_set.all())[-1]
self.assertEqual(a2.text, "Ringo")
self.assertEqual(a2.get_previous_in_order().text, "George")
def test_item_ordering(self):
# We can retrieve the ordering of the queryset from a particular item.
a1 = self.q1.answer_set.all()[1]
id_list = [o.pk for o in self.q1.answer_set.all()]
self.assertSequenceEqual(a1.question.get_answer_order(), id_list)
# It doesn't matter which answer we use to check the order, it will
# always be the same.
a2 = self.Answer.objects.create(text="Number five", question=self.q1)
self.assertListEqual(
list(a1.question.get_answer_order()), list(a2.question.get_answer_order())
)
def test_change_ordering(self):
# The ordering can be altered
a = self.Answer.objects.create(text="Number five", question=self.q1)
# Swap the last two items in the order list
id_list = [o.pk for o in self.q1.answer_set.all()]
x = id_list.pop()
id_list.insert(-1, x)
# By default, the ordering is different from the swapped version
self.assertNotEqual(list(a.question.get_answer_order()), id_list)
# Change the ordering to the swapped version -
# this changes the ordering of the queryset.
a.question.set_answer_order(id_list)
self.assertQuerysetEqual(
self.q1.answer_set.all(), [
"John", "Paul", "George", "Number five", "Ringo"
],
attrgetter("text")
)
class OrderWithRespectToTests2(TestCase):
# Provide the Post model as a class attribute so that we can subclass this
# test case in contenttypes_tests.test_order_with_respect_to and run these
# tests with alternative implementations of Post.
Post = Post
def test_recursive_ordering(self):
p1 = self.Post.objects.create(title="1")
p2 = self.Post.objects.create(title="2")
p1_1 = self.Post.objects.create(title="1.1", parent=p1)
p1_2 = self.Post.objects.create(title="1.2", parent=p1)
self.Post.objects.create(title="2.1", parent=p2)
p1_3 = self.Post.objects.create(title="1.3", parent=p1)
self.assertSequenceEqual(p1.get_post_order(), [p1_1.pk, p1_2.pk, p1_3.pk])
def test_duplicate_order_field(self):
class Bar(models.Model):
class Meta:
app_label = 'order_with_respect_to'
class Foo(models.Model):
bar = models.ForeignKey(Bar, models.CASCADE)
order = models.OrderWrt()
class Meta:
order_with_respect_to = 'bar'
app_label = 'order_with_respect_to'
count = 0
for field in Foo._meta.local_fields:
if isinstance(field, models.OrderWrt):
count += 1
self.assertEqual(count, 1)
| bsd-3-clause |
Moriadry/tensorflow | tensorflow/python/kernel_tests/atrous_conv2d_test.py | 139 | 9770 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolution related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def _upsample_filters(filters, rate):
"""Upsamples the filters by a factor of rate along the spatial dimensions.
Args:
filters: [h, w, in_depth, out_depth]. Original filters.
rate: An int, specifying the upsampling rate.
Returns:
filters_up: [h_up, w_up, in_depth, out_depth]. Upsampled filters with
h_up = h + (h - 1) * (rate - 1)
w_up = w + (w - 1) * (rate - 1)
containing (rate - 1) zeros between consecutive filter values along
the filters' spatial dimensions.
"""
if rate == 1:
return filters
# [h, w, in_depth, out_depth] -> [in_depth, out_depth, h, w]
filters_up = np.transpose(filters, [2, 3, 0, 1])
ker = np.zeros([rate, rate], dtype=np.float32)
ker[0, 0] = 1
filters_up = np.kron(filters_up, ker)[:, :, :-(rate - 1), :-(rate - 1)]
# [in_depth, out_depth, h_up, w_up] -> [h_up, w_up, in_depth, out_depth]
filters_up = np.transpose(filters_up, [2, 3, 0, 1])
return filters_up
class AtrousConv2DTest(test.TestCase):
def testAtrousConv2DForward(self):
with self.test_session(use_gpu=True):
# Input: [batch, height, width, input_depth]
height = 9
for width in [9, 10]: # Test both odd and even width.
x_shape = [2, height, width, 2]
x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape)
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
for kernel_height in range(1, 4):
for kernel_width in range(1, 4):
f_shape = [kernel_height, kernel_width, 2, 2]
f = np.arange(np.prod(f_shape), dtype=np.float32).reshape(f_shape)
for rate in range(1, 4):
f_up = _upsample_filters(f, rate)
for padding in ["SAME", "VALID"]:
y1 = nn_ops.atrous_conv2d(x, f, rate, padding=padding)
y2 = nn_ops.conv2d(
x, f_up, strides=[1, 1, 1, 1], padding=padding)
self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-3, atol=1e-3)
def testAtrousSequence(self):
"""Tests optimization of sequence of atrous convolutions.
Verifies that a sequence of `atrous_conv2d` operations with identical `rate`
parameters, 'SAME' `padding`, and `filters` with odd heights/ widths:
net = atrous_conv2d(net, filters1, rate, padding="SAME")
net = atrous_conv2d(net, filters2, rate, padding="SAME")
...
net = atrous_conv2d(net, filtersK, rate, padding="SAME")
is equivalent to:
pad = ... # padding so that the input dims are multiples of rate
net = space_to_batch(net, paddings=pad, block_size=rate)
net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME")
net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME")
...
net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME")
net = batch_to_space(net, crops=pad, block_size=rate)
"""
padding = "SAME" # The padding needs to be "SAME"
np.random.seed(1) # Make it reproducible.
with self.test_session(use_gpu=True):
# Input: [batch, height, width, input_depth]
for height in range(15, 17):
for width in range(15, 17):
x_shape = [3, height, width, 2]
x = np.random.random_sample(x_shape).astype(np.float32)
for kernel in [1, 3, 5]: # The kernel size needs to be odd.
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
f_shape = [kernel, kernel, 2, 2]
f = 1e-2 * np.random.random_sample(f_shape).astype(np.float32)
for rate in range(2, 4):
# y1: three atrous_conv2d in a row.
y1 = nn_ops.atrous_conv2d(x, f, rate, padding=padding)
y1 = nn_ops.atrous_conv2d(y1, f, rate, padding=padding)
y1 = nn_ops.atrous_conv2d(y1, f, rate, padding=padding)
# y2: space_to_batch, three conv2d in a row, batch_to_space
pad_bottom = 0 if height % rate == 0 else rate - height % rate
pad_right = 0 if width % rate == 0 else rate - width % rate
pad = [[0, pad_bottom], [0, pad_right]]
y2 = array_ops.space_to_batch(x, paddings=pad, block_size=rate)
y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
y2 = array_ops.batch_to_space(y2, crops=pad, block_size=rate)
self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-2, atol=1e-2)
def testGradient(self):
with self.test_session(use_gpu=True):
# Input: [batch, height, width, input_depth]
x_shape = [2, 5, 6, 2]
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
f_shape = [3, 3, 2, 2]
# Output: [batch, height, width, output_depth]
y_shape = [2, 5, 6, 2]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float32)
f_val = np.random.random_sample(f_shape).astype(np.float32)
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
for rate in range(1, 4):
output = nn_ops.atrous_conv2d(x, f, rate=rate, padding="SAME")
err = gradient_checker.compute_gradient_error([x, f],
[x_shape, f_shape],
output, y_shape)
print("atrous_conv2d gradient err = %g " % err)
err_tolerance = 1e-3
self.assertLess(err, err_tolerance)
class AtrousConv2DTransposeTest(test.TestCase):
def testAtrousConv2DTransposeForward(self):
with self.test_session(use_gpu=True):
# Input: [batch, height, width, input_depth]
height = 9
for width in [9, 10]: # Test both odd and even width.
x_shape = [2, height, width, 2]
x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape)
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
for kernel_height in range(1, 4):
for kernel_width in range(1, 4):
f_shape = [kernel_height, kernel_width, 2, 2]
f = np.arange(np.prod(f_shape), dtype=np.float32).reshape(f_shape)
for rate in range(1, 4):
f_up = _upsample_filters(f, rate)
kernel_height_up = (kernel_height + (kernel_height - 1) *
(rate - 1))
kernel_width_up = kernel_width + (kernel_width - 1) * (rate - 1)
for padding in ["SAME", "VALID"]:
if padding == "SAME":
y_shape = [2, height, width, 2]
else:
y_shape = [
2, height + kernel_height_up - 1,
width + kernel_width_up - 1, 2
]
y1 = nn_ops.atrous_conv2d_transpose(x, f, y_shape, rate,
padding)
y2 = nn_ops.conv2d_transpose(
x, f_up, y_shape, strides=[1, 1, 1, 1], padding=padding)
self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-3, atol=1e-3)
class AtrousDepthwiseConv2DTest(test.TestCase):
def testAtrousDepthwiseConv2DForward(self):
strides = [1, 1, 1, 1]
with self.test_session(use_gpu=True):
# Input: [batch, height, width, input_depth]
height = 9
for width in [9, 10]: # Test both odd and even width.
x_shape = [2, height, width, 2]
x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape)
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
for kernel_height in range(1, 4):
for kernel_width in range(1, 4):
f_shape = [kernel_height, kernel_width, 2, 2]
f = np.arange(np.prod(f_shape), dtype=np.float32).reshape(f_shape)
for rate in range(1, 4):
f_up = _upsample_filters(f, rate)
for padding in ["SAME", "VALID"]:
y1 = nn_impl.depthwise_conv2d(
x, f, strides, padding, rate=[rate, rate])
y2 = nn_impl.depthwise_conv2d(x, f_up, strides, padding)
self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-3, atol=1e-3)
if __name__ == "__main__":
test.main()
| apache-2.0 |
ethereum/pyrlp | tests/test_benchmark.py | 1 | 2832 | from itertools import repeat, chain
import sys
import pytest
import rlp
from rlp.sedes import binary, CountableList
from rlp.exceptions import DecodingError, DeserializationError
try:
import pytest_benchmark # noqa: F401
except ImportError:
do_benchmark = False
else:
do_benchmark = True
# speed up setup in case tests aren't run anyway
if do_benchmark:
SIZE = int(1e6)
else:
SIZE = 1
class Message(rlp.Serializable):
fields = [
('field1', binary),
('field2', binary),
('field3', CountableList(binary, max_length=100))
]
def lazy_test_factory(s, valid):
@pytest.mark.benchmark(group='lazy')
def f(benchmark):
@benchmark
def result():
try:
Message.deserialize(rlp.decode_lazy(s))
except (DecodingError, DeserializationError):
return not valid
else:
return valid
assert result
return f
def eager_test_factory(s, valid):
@pytest.mark.benchmark(group='eager')
def f(benchmark):
@benchmark
def result():
try:
rlp.decode(s, Message)
except (DecodingError, DeserializationError):
return not valid
else:
return valid
assert result
return f
def generate_test_functions():
valid = {}
invalid = {}
long_string = bytes(bytearray((i % 256 for i in range(SIZE))))
long_list = rlp.encode([c for c in long_string])
invalid['long_string'] = long_string
invalid['long_list'] = long_list
nested_list = rlp.encode(b'\x00')
for _ in repeat(None, SIZE):
nested_list += rlp.codec.length_prefix(len(nested_list), 0xc0)
invalid['nested_list'] = nested_list
valid['long_string_object'] = rlp.encode([b'\x00', long_string, []])
prefix = rlp.codec.length_prefix(1 + 1 + len(long_list), 0xc0)
invalid['long_list_object'] = prefix + rlp.encode(b'\x00') + rlp.encode(b'\x00') + long_list
valid['friendly'] = rlp.encode(Message(
b'hello',
b"I'm friendly",
[b'not', b'many', b'elements'],
))
invalid = invalid.items()
valid = valid.items()
rlp_strings = [i[1] for i in chain(valid, invalid)]
valids = [True] * len(valid) + [False] * len(invalid)
names = [i[0] for i in chain(valid, invalid)]
current_module = sys.modules[__name__]
for rlp_string, valid, name in zip(rlp_strings, valids, names):
f_eager = pytest.mark.skipif('not do_benchmark')(eager_test_factory(rlp_string, valid))
f_lazy = pytest.mark.skipif('not do_benchmark')(lazy_test_factory(rlp_string, valid))
setattr(current_module, 'test_eager_' + name, f_eager)
setattr(current_module, 'test_lazy_' + name, f_lazy)
generate_test_functions()
| mit |
prakritish/ansible | lib/ansible/modules/cloud/openstack/os_client_config.py | 77 | 2503 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_client_config
short_description: Get OpenStack Client config
description:
- Get I(openstack) client config data from clouds.yaml or environment
version_added: "2.0"
notes:
- Facts are placed in the C(openstack.clouds) variable.
options:
clouds:
description:
- List of clouds to limit the return list to. No value means return
information on all configured clouds
required: false
default: []
requirements: [ os-client-config ]
author: "Monty Taylor (@emonty)"
'''
EXAMPLES = '''
- name: Get list of clouds that do not support security groups
os_client_config:
- debug:
var: "{{ item }}"
with_items: "{{ openstack.clouds | rejectattr('secgroup_source', 'none') | list }}"
- name: Get the information back just about the mordred cloud
os_client_config:
clouds:
- mordred
'''
import os_client_config
from os_client_config import exceptions
def main():
module = AnsibleModule(argument_spec=dict(
clouds=dict(required=False, type='list', default=[]),
))
p = module.params
try:
config = os_client_config.OpenStackConfig()
clouds = []
for cloud in config.get_all_clouds():
if not p['clouds'] or cloud.name in p['clouds']:
cloud.config['name'] = cloud.name
clouds.append(cloud.config)
module.exit_json(ansible_facts=dict(openstack=dict(clouds=clouds)))
except exceptions.OpenStackConfigException as e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
if __name__ == "__main__":
main()
| gpl-3.0 |
sqall01/lightweight-push | lightweight_push.py | 1 | 11821 | #!/usr/bin/python
# written by sqall
# twitter: https://twitter.com/sqall01
# blog: https://h4des.org
# github: https://github.com/sqall01
#
# Licensed under the MIT License.
import time
import logging
import optparse
import sys
from lightweightpush import LightweightPush, ErrorCodes
################ GLOBAL CONFIGURATION DATA ################
# Used log level (will be ignored if command line parameter is given).
# valid log levels: DEBUG, INFO, WARNING, ERROR, CRITICAL
loglevel = logging.INFO
# Shared secret used to encrypt the message
# (will be ignored if command line parameter is given).
shared_secret = "MySuperSecretSharedSecret"
# Username used to send the message to the server.
# The username is the eMail address you used for your alertr.de account
# (will be ignored if command line parameter is given).
username = "MyEmailAccount@alertr.de"
# The password of your alertr.de account.
# (will be ignored if command line parameter is given).
password = "MyAlertrDePassword"
# Channel used for the message
# (will be ignored if command line parameter is given).
channel = "MyChannel"
# Number of connection retries until sending is given up
# (will be ignored if command line parameter is given).
# A value of -1 means it is retried indefinitely.
max_retries = 16
if __name__ == '__main__':
# parsing command line options
parser = optparse.OptionParser()
parser.formatter = optparse.TitledHelpFormatter()
# Give an example command to send a message
parser.epilog = "Example command to send a message: " \
+ "\t\t\t\t\t\t\t\t\t\t" \
+ "'python %s --sbj \"Test Subject\" --msg \"Long text.\"'" \
% sys.argv[0] \
+ "\t\t\t\t\t\t\t\t\t\t" \
+ "Example command to send a message with stdin: " \
+ "\t\t\t\t\t\t\t\t\t\t" \
+ "'echo \"Long text.\" | python %s --sbj \"Test Subject\"'" \
% sys.argv[0] \
+ "\t\t\t\t\t\t\t\t\t\t" \
+ "For more detailed examples please visit: " \
+ "\t\t\t\t\t\t\t\t\t\t" \
+ "https://github.com/sqall01/lightweight-push"
message_group = optparse.OptionGroup(parser,
"Message arguments.")
message_group.add_option("--sbj",
"--subject",
dest="subject",
action="store",
help="Subject of the message. " \
"(Required)",
default=None)
message_group.add_option("--msg",
"--message",
dest="message",
action="store",
help="Message to send. " \
"(Required if message is not given via stdin)",
default=None)
message_group.add_option("-s",
"--state",
dest="state",
action="store",
type="int",
help="State of the sensor alert message " \
"(if not given, the message is not "\
"considered to be a sensor alert). " \
"Valid values: 0 or 1 " \
"(Optional)",
default=None)
message_group.add_option("-t",
"--time-triggered",
dest="tt",
action="store",
type="int",
help="UTC timestamp the alarm was triggered " \
"(if not given, the current UTC time is used). " \
"(Optional)",
default=None)
config_group = optparse.OptionGroup(parser,
"Configuration arguments.")
config_group.add_option("-u",
"--username",
dest="username",
action="store",
help="Username used to send the message to the server. " \
"The username is the eMail address you used for your " \
"alertr.de account " \
"(if not given, the one configured in the script is used). " \
"(Optional)",
default=None)
config_group.add_option("-p",
"--password",
dest="password",
action="store",
help="The password of your alertr.de account " \
"(if not given, the one configured in the script is used). " \
"(Optional)",
default=None)
config_group.add_option("-c",
"--channel",
dest="channel",
action="store",
help="Channel used for the message " \
"(if not given, the one configured in the script is used). " \
"(Optional)",
default=None)
config_group.add_option("--ss",
"--shared-secret",
dest="shared_secret",
action="store",
help="Shared secret used to encrypt the message " \
"(if not given, the one configured in the script is used). " \
"(Optional)",
default=None)
config_group.add_option("-m",
"--max-retries",
dest="max_retries",
action="store",
type="int",
help="Number of connection retries until sending is given up " \
"(if not given, the one configured in the script is used). " \
"A value of -1 means it is retried indefinitely " \
"(Optional)",
default=None)
config_group.add_option("-l",
"--loglevel",
dest="loglevel",
action="store",
help="Used log level " \
"(if not given, the one configured in the script is used). " \
"Valid log levels: DEBUG, INFO, WARNING, ERROR, CRITICAL " \
"(Optional)",
default=None)
config_group.add_option("",
"--no-check-ssl-certificate",
dest="no_check_SSL_certificate",
action="store_true",
help="Do not verify the SSL certificate of the server. " \
"Only use it if you know what you are doing. This option " \
"will allow Man-In-The-Middle attacks during the sending " \
"process. " \
"(Optional)",
default=False)
parser.add_option_group(message_group)
parser.add_option_group(config_group)
(options, args) = parser.parse_args()
# Remove CA file for checking SSL connection.
if options.no_check_SSL_certificate:
removeCaFile(ca_file)
ca_file = None
# Overwrite settings if given as an command line argument.
if options.username:
username = options.username
# Parse username option.
if options.password:
password = options.password
# Parse channel option.
if options.channel:
channel = options.channel
# Parse shared secret option.
if options.shared_secret:
shared_secret = options.shared_secret
# Parse max retries option.
if options.max_retries:
max_retries = options.max_retries
# Parse loglevel option.
if options.loglevel:
temp_loglevel = options.loglevel.upper()
if temp_loglevel == "DEBUG":
loglevel = logging.DEBUG
elif temp_loglevel == "INFO":
loglevel = logging.INFO
elif temp_loglevel == "WARNING":
loglevel = logging.WARNING
elif temp_loglevel == "ERROR":
loglevel = logging.ERROR
elif temp_loglevel == "CRITICAL":
loglevel = logging.CRITICAL
else:
print("Given loglevel illegal.")
sys.exit(1)
# Initialize logging
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %H:%M:%S', level=loglevel)
# Parse state option.
if options.state is not None:
is_sa = True
if options.state == 1 or options.state == 0:
state = options.state
else:
logging.critical("State can either be 0 or 1.")
sys.exit(1)
else:
is_sa = False
state = None
# Parse time triggered option.
if options.tt:
tt = options.tt
else:
tt = int(time.time())
# Parse subject option
if options.subject is None:
logging.critical("Subject of message is required.")
sys.exit(1)
else:
subject = options.subject
push_service = LightweightPush(username,
password,
shared_secret)
# Check if channel is valid.
if not push_service._check_channel(channel):
logging.critical("Channel contains illegal characters.")
sys.exit(1)
# Check if script is used directly.
if options.message:
message = options.message
elif not sys.stdin.isatty():
# Parse message from stdin as script is used in a pipe.
message = ""
for line in sys.stdin:
message += line
else:
logging.critical("Message is required.")
sys.exit(1)
max_retries_ctr = max_retries
times_sleep = 5
while True:
error_code = push_service.send_msg(subject,
message,
channel,
state=state,
time_triggered=tt,
max_retries=1)
# Processing error code
if error_code is None:
pass
elif error_code == ErrorCodes.NO_ERROR:
logging.info("Sending message successful.")
break
elif error_code == ErrorCodes.DATABASE_ERROR:
logging.error("Database error on server side. Trying again.")
elif error_code == ErrorCodes.AUTH_ERROR:
logging.error("Authentication failed. "
+ "Check your credentials.")
sys.exit(1)
elif error_code == ErrorCodes.ILLEGAL_MSG_ERROR:
logging.error("Illegal message was sent. "
+ "Please make sure to use the newest version. "
+ "If you do, please open an issue on "
+ "https://github.com/sqall01/lightweight-push")
sys.exit(1)
elif error_code == ErrorCodes.GOOGLE_MSG_TOO_LARGE:
logging.error("Transmitted message too large. "
+ "Please shorten it.")
sys.exit(1)
elif error_code == ErrorCodes.GOOGLE_CONNECTION:
logging.error("Connection error on server side. "
+ "Trying again.")
elif error_code == ErrorCodes.GOOGLE_AUTH:
logging.error("Authentication error on server side. "
+ "Trying again.")
elif error_code == ErrorCodes.VERSION_MISSMATCH:
logging.error("Version mismatch. "
+ "Please update your client.")
sys.exit(1)
elif error_code == ErrorCodes.NO_NOTIFICATION_PERMISSION:
logging.error("No permission to use notification channel. "
+ "Please update channel configuration.")
sys.exit(1)
elif error_code == ErrorCodes.CLIENT_CONNECTION_ERROR:
logging.error("Client could not create a connection to the "
+ "server. Please check your Internet connection.")
elif error_code == ErrorCodes.WEB_BRIDGE_ERROR:
logging.error("Web bridge error on server side. "
+ "Trying again.")
else:
logging.error("The following error code occurred: %d."
% error_code
+ "Please make sure to use the newest version. "
+ "If you do, please open an issue on "
+ "https://github.com/sqall01/lightweight-push")
sys.exit(1)
# Process retries.
if max_retries_ctr == 0:
logging.error("Tried the maximum of times for sending. Giving up.")
sys.exit(1)
elif max_retries_ctr < 0:
pass
else:
max_retries_ctr -= 1
logging.info("Waiting %d seconds before trying again." % times_sleep)
time.sleep(times_sleep)
times_sleep *= 2
if times_sleep > 86400:
times_sleep = 86400
sys.exit(0) | mit |
thjashin/tensorflow | tensorflow/python/client/client_lib.py | 111 | 1698 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for launching graphs and executing operations.
See the @{$python/client} guide.
@@Session
@@InteractiveSession
@@get_default_session
@@OpError
@@CancelledError
@@UnknownError
@@InvalidArgumentError
@@DeadlineExceededError
@@NotFoundError
@@AlreadyExistsError
@@PermissionDeniedError
@@UnauthenticatedError
@@ResourceExhaustedError
@@FailedPreconditionError
@@AbortedError
@@OutOfRangeError
@@UnimplementedError
@@InternalError
@@UnavailableError
@@DataLossError
@@exception_type_from_error_code
@@error_code_from_exception_type
@@raise_exception_on_not_ok_status
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.client.session import InteractiveSession
from tensorflow.python.client.session import Session
from tensorflow.python.framework import errors
from tensorflow.python.framework.errors import OpError
from tensorflow.python.framework.ops import get_default_session
| apache-2.0 |
krahman/BuildingMachineLearningSystemsWithPython | ch02/seeds_knn.py | 1 | 1114 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from load import load_dataset
import numpy as np
from knn import learn_model, apply_model, accuracy
features, labels = load_dataset('seeds')
def cross_validate(features, labels):
'''Compute cross-validation errors'''
error = 0.0
for fold in range(10):
training = np.ones(len(features), bool)
training[fold::10] = 0
testing = ~training
model = learn_model(1, features[training], labels[training])
test_error = accuracy(features[testing], labels[testing], model)
error += test_error
return error / 10.0
error = cross_validate(features, labels)
print('Ten fold cross-validated error was {0:.1%}.'.format(error))
# Z-score (whiten) the features
features -= features.mean(0)
features /= features.std(0)
error = cross_validate(features, labels)
print(
'Ten fold cross-validated error after z-scoring was {0:.1%}.'.format(error))
| mit |
egoid/baytree | lib/python2.7/site-packages/django/contrib/gis/db/backends/postgis/operations.py | 33 | 18888 | import re
from django.conf import settings
from django.contrib.gis.db.backends.base.operations import \
BaseSpatialOperations
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.gdal import GDALRaster
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.postgresql.operations import DatabaseOperations
from django.db.utils import ProgrammingError
from django.utils import six
from django.utils.functional import cached_property
from .adapter import PostGISAdapter
from .models import PostGISGeometryColumns, PostGISSpatialRefSys
from .pgraster import from_pgraster, get_pgraster_srid, to_pgraster
# Identifier to mark raster lookups as bilateral.
BILATERAL = 'bilateral'
class PostGISOperator(SpatialOperator):
def __init__(self, geography=False, raster=False, **kwargs):
# Only a subset of the operators and functions are available for the
# geography type.
self.geography = geography
# Only a subset of the operators and functions are available for the
# raster type. Lookups that don't suport raster will be converted to
# polygons. If the raster argument is set to BILATERAL, then the
# operator cannot handle mixed geom-raster lookups.
self.raster = raster
super(PostGISOperator, self).__init__(**kwargs)
def as_sql(self, connection, lookup, template_params, *args):
if lookup.lhs.output_field.geography and not self.geography:
raise ValueError('PostGIS geography does not support the "%s" '
'function/operator.' % (self.func or self.op,))
template_params = self.check_raster(lookup, template_params)
return super(PostGISOperator, self).as_sql(connection, lookup, template_params, *args)
def check_raster(self, lookup, template_params):
# Get rhs value.
if isinstance(lookup.rhs, (tuple, list)):
rhs_val = lookup.rhs[0]
spheroid = lookup.rhs[-1] == 'spheroid'
else:
rhs_val = lookup.rhs
spheroid = False
# Check which input is a raster.
lhs_is_raster = lookup.lhs.field.geom_type == 'RASTER'
rhs_is_raster = isinstance(rhs_val, GDALRaster)
# Look for band indices and inject them if provided.
if lookup.band_lhs is not None and lhs_is_raster:
if not self.func:
raise ValueError('Band indices are not allowed for this operator, it works on bbox only.')
template_params['lhs'] = '%s, %s' % (template_params['lhs'], lookup.band_lhs)
if lookup.band_rhs is not None and rhs_is_raster:
if not self.func:
raise ValueError('Band indices are not allowed for this operator, it works on bbox only.')
template_params['rhs'] = '%s, %s' % (template_params['rhs'], lookup.band_rhs)
# Convert rasters to polygons if necessary.
if not self.raster or spheroid:
# Operators without raster support.
if lhs_is_raster:
template_params['lhs'] = 'ST_Polygon(%s)' % template_params['lhs']
if rhs_is_raster:
template_params['rhs'] = 'ST_Polygon(%s)' % template_params['rhs']
elif self.raster == BILATERAL:
# Operators with raster support but don't support mixed (rast-geom)
# lookups.
if lhs_is_raster and not rhs_is_raster:
template_params['lhs'] = 'ST_Polygon(%s)' % template_params['lhs']
elif rhs_is_raster and not lhs_is_raster:
template_params['rhs'] = 'ST_Polygon(%s)' % template_params['rhs']
return template_params
class PostGISDistanceOperator(PostGISOperator):
sql_template = '%(func)s(%(lhs)s, %(rhs)s) %(op)s %(value)s'
def as_sql(self, connection, lookup, template_params, sql_params):
if not lookup.lhs.output_field.geography and lookup.lhs.output_field.geodetic(connection):
template_params = self.check_raster(lookup, template_params)
sql_template = self.sql_template
if len(lookup.rhs) == 3 and lookup.rhs[-1] == 'spheroid':
template_params.update({'op': self.op, 'func': 'ST_Distance_Spheroid'})
sql_template = '%(func)s(%(lhs)s, %(rhs)s, %%s) %(op)s %(value)s'
# Using distance_spheroid requires the spheroid of the field as
# a parameter.
sql_params.insert(1, lookup.lhs.output_field._spheroid)
else:
template_params.update({'op': self.op, 'func': 'ST_Distance_Sphere'})
return sql_template % template_params, sql_params
return super(PostGISDistanceOperator, self).as_sql(connection, lookup, template_params, sql_params)
class PostGISOperations(BaseSpatialOperations, DatabaseOperations):
name = 'postgis'
postgis = True
geography = True
geom_func_prefix = 'ST_'
version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)')
Adapter = PostGISAdapter
gis_operators = {
'bbcontains': PostGISOperator(op='~', raster=True),
'bboverlaps': PostGISOperator(op='&&', geography=True, raster=True),
'contained': PostGISOperator(op='@', raster=True),
'overlaps_left': PostGISOperator(op='&<', raster=BILATERAL),
'overlaps_right': PostGISOperator(op='&>', raster=BILATERAL),
'overlaps_below': PostGISOperator(op='&<|'),
'overlaps_above': PostGISOperator(op='|&>'),
'left': PostGISOperator(op='<<'),
'right': PostGISOperator(op='>>'),
'strictly_below': PostGISOperator(op='<<|'),
'strictly_above': PostGISOperator(op='|>>'),
'same_as': PostGISOperator(op='~=', raster=BILATERAL),
'exact': PostGISOperator(op='~=', raster=BILATERAL), # alias of same_as
'contains': PostGISOperator(func='ST_Contains', raster=BILATERAL),
'contains_properly': PostGISOperator(func='ST_ContainsProperly', raster=BILATERAL),
'coveredby': PostGISOperator(func='ST_CoveredBy', geography=True, raster=BILATERAL),
'covers': PostGISOperator(func='ST_Covers', geography=True, raster=BILATERAL),
'crosses': PostGISOperator(func='ST_Crosses'),
'disjoint': PostGISOperator(func='ST_Disjoint', raster=BILATERAL),
'equals': PostGISOperator(func='ST_Equals'),
'intersects': PostGISOperator(func='ST_Intersects', geography=True, raster=BILATERAL),
'isvalid': PostGISOperator(func='ST_IsValid'),
'overlaps': PostGISOperator(func='ST_Overlaps', raster=BILATERAL),
'relate': PostGISOperator(func='ST_Relate'),
'touches': PostGISOperator(func='ST_Touches', raster=BILATERAL),
'within': PostGISOperator(func='ST_Within', raster=BILATERAL),
'dwithin': PostGISOperator(func='ST_DWithin', geography=True, raster=BILATERAL),
'distance_gt': PostGISDistanceOperator(func='ST_Distance', op='>', geography=True),
'distance_gte': PostGISDistanceOperator(func='ST_Distance', op='>=', geography=True),
'distance_lt': PostGISDistanceOperator(func='ST_Distance', op='<', geography=True),
'distance_lte': PostGISDistanceOperator(func='ST_Distance', op='<=', geography=True),
}
unsupported_functions = set()
function_names = {
'BoundingCircle': 'ST_MinimumBoundingCircle',
'MemSize': 'ST_Mem_Size',
'NumPoints': 'ST_NPoints',
}
def __init__(self, connection):
super(PostGISOperations, self).__init__(connection)
prefix = self.geom_func_prefix
self.area = prefix + 'Area'
self.bounding_circle = prefix + 'MinimumBoundingCircle'
self.centroid = prefix + 'Centroid'
self.collect = prefix + 'Collect'
self.difference = prefix + 'Difference'
self.distance = prefix + 'Distance'
self.distance_sphere = prefix + 'distance_sphere'
self.distance_spheroid = prefix + 'distance_spheroid'
self.envelope = prefix + 'Envelope'
self.extent = prefix + 'Extent'
self.extent3d = prefix + '3DExtent'
self.force_rhr = prefix + 'ForceRHR'
self.geohash = prefix + 'GeoHash'
self.geojson = prefix + 'AsGeoJson'
self.gml = prefix + 'AsGML'
self.intersection = prefix + 'Intersection'
self.isvalid = prefix + 'IsValid'
self.kml = prefix + 'AsKML'
self.length = prefix + 'Length'
self.length3d = prefix + '3DLength'
self.length_spheroid = prefix + 'length_spheroid'
self.makeline = prefix + 'MakeLine'
self.makevalid = prefix + 'MakeValid'
self.mem_size = prefix + 'mem_size'
self.num_geom = prefix + 'NumGeometries'
self.num_points = prefix + 'npoints'
self.perimeter = prefix + 'Perimeter'
self.perimeter3d = prefix + '3DPerimeter'
self.point_on_surface = prefix + 'PointOnSurface'
self.polygonize = prefix + 'Polygonize'
self.reverse = prefix + 'Reverse'
self.scale = prefix + 'Scale'
self.snap_to_grid = prefix + 'SnapToGrid'
self.svg = prefix + 'AsSVG'
self.sym_difference = prefix + 'SymDifference'
self.transform = prefix + 'Transform'
self.translate = prefix + 'Translate'
self.union = prefix + 'Union'
self.unionagg = prefix + 'Union'
@cached_property
def spatial_version(self):
"""Determine the version of the PostGIS library."""
# Trying to get the PostGIS version because the function
# signatures will depend on the version used. The cost
# here is a database query to determine the version, which
# can be mitigated by setting `POSTGIS_VERSION` with a 3-tuple
# comprising user-supplied values for the major, minor, and
# subminor revision of PostGIS.
if hasattr(settings, 'POSTGIS_VERSION'):
version = settings.POSTGIS_VERSION
else:
# Run a basic query to check the status of the connection so we're
# sure we only raise the error below if the problem comes from
# PostGIS and not from PostgreSQL itself (see #24862).
self._get_postgis_func('version')
try:
vtup = self.postgis_version_tuple()
except ProgrammingError:
raise ImproperlyConfigured(
'Cannot determine PostGIS version for database "%s" '
'using command "SELECT postgis_lib_version()". '
'GeoDjango requires at least PostGIS version 2.0. '
'Was the database created from a spatial database '
'template?' % self.connection.settings_dict['NAME']
)
version = vtup[1:]
return version
def convert_extent(self, box, srid):
"""
Returns a 4-tuple extent for the `Extent` aggregate by converting
the bounding box text returned by PostGIS (`box` argument), for
example: "BOX(-90.0 30.0, -85.0 40.0)".
"""
if box is None:
return None
ll, ur = box[4:-1].split(',')
xmin, ymin = map(float, ll.split())
xmax, ymax = map(float, ur.split())
return (xmin, ymin, xmax, ymax)
def convert_extent3d(self, box3d, srid):
"""
Returns a 6-tuple extent for the `Extent3D` aggregate by converting
the 3d bounding-box text returned by PostGIS (`box3d` argument), for
example: "BOX3D(-90.0 30.0 1, -85.0 40.0 2)".
"""
if box3d is None:
return None
ll, ur = box3d[6:-1].split(',')
xmin, ymin, zmin = map(float, ll.split())
xmax, ymax, zmax = map(float, ur.split())
return (xmin, ymin, zmin, xmax, ymax, zmax)
def convert_geom(self, hex, geo_field):
"""
Converts the geometry returned from PostGIS aggregates.
"""
if hex:
return Geometry(hex, srid=geo_field.srid)
else:
return None
def geo_db_type(self, f):
"""
Return the database field type for the given spatial field.
"""
if f.geom_type == 'RASTER':
return 'raster'
# Type-based geometries.
# TODO: Support 'M' extension.
if f.dim == 3:
geom_type = f.geom_type + 'Z'
else:
geom_type = f.geom_type
if f.geography:
if f.srid != 4326:
raise NotImplementedError('PostGIS only supports geography columns with an SRID of 4326.')
return 'geography(%s,%d)' % (geom_type, f.srid)
else:
return 'geometry(%s,%d)' % (geom_type, f.srid)
def get_distance(self, f, dist_val, lookup_type, handle_spheroid=True):
"""
Retrieve the distance parameters for the given geometry field,
distance lookup value, and the distance lookup type.
This is the most complex implementation of the spatial backends due to
what is supported on geodetic geometry columns vs. what's available on
projected geometry columns. In addition, it has to take into account
the geography column type.
"""
# Getting the distance parameter
value = dist_val[0]
# Shorthand boolean flags.
geodetic = f.geodetic(self.connection)
geography = f.geography
if isinstance(value, Distance):
if geography:
dist_param = value.m
elif geodetic:
if lookup_type == 'dwithin':
raise ValueError('Only numeric values of degree units are '
'allowed on geographic DWithin queries.')
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
# Assuming the distance is in the units of the field.
dist_param = value
params = [dist_param]
# handle_spheroid *might* be dropped in Django 2.0 as PostGISDistanceOperator
# also handles it (#25524).
if handle_spheroid and len(dist_val) > 1:
option = dist_val[1]
if not geography and geodetic and lookup_type != 'dwithin' and option == 'spheroid':
# using distance_spheroid requires the spheroid of the field as
# a parameter.
params.insert(0, f._spheroid)
return params
def get_geom_placeholder(self, f, value, compiler):
"""
Provide a proper substitution value for Geometries or rasters that are
not in the SRID of the field. Specifically, this routine will
substitute in the ST_Transform() function call.
"""
# Get the srid for this object
if value is None:
value_srid = None
elif f.geom_type == 'RASTER' and isinstance(value, six.string_types):
value_srid = get_pgraster_srid(value)
else:
value_srid = value.srid
# Adding Transform() to the SQL placeholder if the value srid
# is not equal to the field srid.
if value_srid is None or value_srid == f.srid:
placeholder = '%s'
elif f.geom_type == 'RASTER' and isinstance(value, six.string_types):
placeholder = '%s((%%s)::raster, %s)' % (self.transform, f.srid)
else:
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
if hasattr(value, 'as_sql'):
# If this is an F expression, then we don't really want
# a placeholder and instead substitute in the column
# of the expression.
sql, _ = compiler.compile(value)
placeholder = placeholder % sql
return placeholder
def _get_postgis_func(self, func):
"""
Helper routine for calling PostGIS functions and returning their result.
"""
# Close out the connection. See #9437.
with self.connection.temporary_connection() as cursor:
cursor.execute('SELECT %s()' % func)
return cursor.fetchone()[0]
def postgis_geos_version(self):
"Returns the version of the GEOS library used with PostGIS."
return self._get_postgis_func('postgis_geos_version')
def postgis_lib_version(self):
"Returns the version number of the PostGIS library used with PostgreSQL."
return self._get_postgis_func('postgis_lib_version')
def postgis_proj_version(self):
"Returns the version of the PROJ.4 library used with PostGIS."
return self._get_postgis_func('postgis_proj_version')
def postgis_version(self):
"Returns PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_version')
def postgis_full_version(self):
"Returns PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_full_version')
def postgis_version_tuple(self):
"""
Returns the PostGIS version as a tuple (version string, major,
minor, subminor).
"""
# Getting the PostGIS version
version = self.postgis_lib_version()
m = self.version_regex.match(version)
if m:
major = int(m.group('major'))
minor1 = int(m.group('minor1'))
minor2 = int(m.group('minor2'))
else:
raise Exception('Could not parse PostGIS version string: %s' % version)
return (version, major, minor1, minor2)
def proj_version_tuple(self):
"""
Return the version of PROJ.4 used by PostGIS as a tuple of the
major, minor, and subminor release numbers.
"""
proj_regex = re.compile(r'(\d+)\.(\d+)\.(\d+)')
proj_ver_str = self.postgis_proj_version()
m = proj_regex.search(proj_ver_str)
if m:
return tuple(map(int, [m.group(1), m.group(2), m.group(3)]))
else:
raise Exception('Could not determine PROJ.4 version from PostGIS.')
def spatial_aggregate_name(self, agg_name):
if agg_name == 'Extent3D':
return self.extent3d
else:
return self.geom_func_prefix + agg_name
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
return PostGISGeometryColumns
def spatial_ref_sys(self):
return PostGISSpatialRefSys
# Methods to convert between PostGIS rasters and dicts that are
# readable by GDALRaster.
def parse_raster(self, value):
return from_pgraster(value)
def deconstruct_raster(self, value):
return to_pgraster(value)
| mit |
mrquim/repository.mrquim | repo/script.module.pycryptodome/lib/Crypto/Signature/__init__.py | 19 | 1686 | # ===================================================================
#
# Copyright (c) 2014, Legrandin <helderijs@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
"""Digital signature protocols
A collection of standardized protocols to carry out digital signatures.
"""
__all__ = ['PKCS1_v1_5', 'PKCS1_PSS', 'DSS', 'pkcs1_15', 'pss']
| gpl-2.0 |
jeenalee/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/test/test_handshake_hybi.py | 413 | 22552 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for handshake module."""
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket import common
from mod_pywebsocket.handshake._base import AbortedByUserException
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake._base import VersionException
from mod_pywebsocket.handshake.hybi import Handshaker
import mock
class RequestDefinition(object):
"""A class for holding data for constructing opening handshake strings for
testing the opening handshake processor.
"""
def __init__(self, method, uri, headers):
self.method = method
self.uri = uri
self.headers = headers
def _create_good_request_def():
return RequestDefinition(
'GET', '/demo',
{'Host': 'server.example.com',
'Upgrade': 'websocket',
'Connection': 'Upgrade',
'Sec-WebSocket-Key': 'dGhlIHNhbXBsZSBub25jZQ==',
'Sec-WebSocket-Version': '13',
'Origin': 'http://example.com'})
def _create_request(request_def):
conn = mock.MockConn('')
return mock.MockRequest(
method=request_def.method,
uri=request_def.uri,
headers_in=request_def.headers,
connection=conn)
def _create_handshaker(request):
handshaker = Handshaker(request, mock.MockDispatcher())
return handshaker
class SubprotocolChoosingDispatcher(object):
"""A dispatcher for testing. This dispatcher sets the i-th subprotocol
of requested ones to ws_protocol where i is given on construction as index
argument. If index is negative, default_value will be set to ws_protocol.
"""
def __init__(self, index, default_value=None):
self.index = index
self.default_value = default_value
def do_extra_handshake(self, conn_context):
if self.index >= 0:
conn_context.ws_protocol = conn_context.ws_requested_protocols[
self.index]
else:
conn_context.ws_protocol = self.default_value
def transfer_data(self, conn_context):
pass
class HandshakeAbortedException(Exception):
pass
class AbortingDispatcher(object):
"""A dispatcher for testing. This dispatcher raises an exception in
do_extra_handshake to reject the request.
"""
def do_extra_handshake(self, conn_context):
raise HandshakeAbortedException('An exception to reject the request')
def transfer_data(self, conn_context):
pass
class AbortedByUserDispatcher(object):
"""A dispatcher for testing. This dispatcher raises an
AbortedByUserException in do_extra_handshake to reject the request.
"""
def do_extra_handshake(self, conn_context):
raise AbortedByUserException('An AbortedByUserException to reject the '
'request')
def transfer_data(self, conn_context):
pass
_EXPECTED_RESPONSE = (
'HTTP/1.1 101 Switching Protocols\r\n'
'Upgrade: websocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\r\n\r\n')
class HandshakerTest(unittest.TestCase):
"""A unittest for draft-ietf-hybi-thewebsocketprotocol-06 and later
handshake processor.
"""
def test_do_handshake(self):
request = _create_request(_create_good_request_def())
dispatcher = mock.MockDispatcher()
handshaker = Handshaker(request, dispatcher)
handshaker.do_handshake()
self.assertTrue(dispatcher.do_extra_handshake_called)
self.assertEqual(
_EXPECTED_RESPONSE, request.connection.written_data())
self.assertEqual('/demo', request.ws_resource)
self.assertEqual('http://example.com', request.ws_origin)
self.assertEqual(None, request.ws_protocol)
self.assertEqual(None, request.ws_extensions)
self.assertEqual(common.VERSION_HYBI_LATEST, request.ws_version)
def test_do_handshake_with_extra_headers(self):
request_def = _create_good_request_def()
# Add headers not related to WebSocket opening handshake.
request_def.headers['FooKey'] = 'BarValue'
request_def.headers['EmptyKey'] = ''
request = _create_request(request_def)
handshaker = _create_handshaker(request)
handshaker.do_handshake()
self.assertEqual(
_EXPECTED_RESPONSE, request.connection.written_data())
def test_do_handshake_with_capitalized_value(self):
request_def = _create_good_request_def()
request_def.headers['upgrade'] = 'WEBSOCKET'
request = _create_request(request_def)
handshaker = _create_handshaker(request)
handshaker.do_handshake()
self.assertEqual(
_EXPECTED_RESPONSE, request.connection.written_data())
request_def = _create_good_request_def()
request_def.headers['Connection'] = 'UPGRADE'
request = _create_request(request_def)
handshaker = _create_handshaker(request)
handshaker.do_handshake()
self.assertEqual(
_EXPECTED_RESPONSE, request.connection.written_data())
def test_do_handshake_with_multiple_connection_values(self):
request_def = _create_good_request_def()
request_def.headers['Connection'] = 'Upgrade, keep-alive, , '
request = _create_request(request_def)
handshaker = _create_handshaker(request)
handshaker.do_handshake()
self.assertEqual(
_EXPECTED_RESPONSE, request.connection.written_data())
def test_aborting_handshake(self):
handshaker = Handshaker(
_create_request(_create_good_request_def()),
AbortingDispatcher())
# do_extra_handshake raises an exception. Check that it's not caught by
# do_handshake.
self.assertRaises(HandshakeAbortedException, handshaker.do_handshake)
def test_do_handshake_with_protocol(self):
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Protocol'] = 'chat, superchat'
request = _create_request(request_def)
handshaker = Handshaker(request, SubprotocolChoosingDispatcher(0))
handshaker.do_handshake()
EXPECTED_RESPONSE = (
'HTTP/1.1 101 Switching Protocols\r\n'
'Upgrade: websocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\r\n'
'Sec-WebSocket-Protocol: chat\r\n\r\n')
self.assertEqual(EXPECTED_RESPONSE, request.connection.written_data())
self.assertEqual('chat', request.ws_protocol)
def test_do_handshake_protocol_not_in_request_but_in_response(self):
request_def = _create_good_request_def()
request = _create_request(request_def)
handshaker = Handshaker(
request, SubprotocolChoosingDispatcher(-1, 'foobar'))
# No request has been made but ws_protocol is set. HandshakeException
# must be raised.
self.assertRaises(HandshakeException, handshaker.do_handshake)
def test_do_handshake_with_protocol_no_protocol_selection(self):
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Protocol'] = 'chat, superchat'
request = _create_request(request_def)
handshaker = _create_handshaker(request)
# ws_protocol is not set. HandshakeException must be raised.
self.assertRaises(HandshakeException, handshaker.do_handshake)
def test_do_handshake_with_extensions(self):
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Extensions'] = (
'permessage-compress; method=deflate, unknown')
EXPECTED_RESPONSE = (
'HTTP/1.1 101 Switching Protocols\r\n'
'Upgrade: websocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\r\n'
'Sec-WebSocket-Extensions: permessage-compress; method=deflate\r\n'
'\r\n')
request = _create_request(request_def)
handshaker = _create_handshaker(request)
handshaker.do_handshake()
self.assertEqual(EXPECTED_RESPONSE, request.connection.written_data())
self.assertEqual(1, len(request.ws_extensions))
extension = request.ws_extensions[0]
self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION,
extension.name())
self.assertEqual(['method'], extension.get_parameter_names())
self.assertEqual('deflate', extension.get_parameter_value('method'))
self.assertEqual(1, len(request.ws_extension_processors))
self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION,
request.ws_extension_processors[0].name())
def test_do_handshake_with_permessage_compress(self):
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Extensions'] = (
'permessage-compress; method=deflate')
request = _create_request(request_def)
handshaker = _create_handshaker(request)
handshaker.do_handshake()
self.assertEqual(1, len(request.ws_extensions))
self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION,
request.ws_extensions[0].name())
self.assertEqual(1, len(request.ws_extension_processors))
self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION,
request.ws_extension_processors[0].name())
def test_do_handshake_with_quoted_extensions(self):
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Extensions'] = (
'permessage-compress; method=deflate, , '
'unknown; e = "mc^2"; ma="\r\n \\\rf "; pv=nrt')
request = _create_request(request_def)
handshaker = _create_handshaker(request)
handshaker.do_handshake()
self.assertEqual(2, len(request.ws_requested_extensions))
first_extension = request.ws_requested_extensions[0]
self.assertEqual('permessage-compress', first_extension.name())
self.assertEqual(['method'], first_extension.get_parameter_names())
self.assertEqual('deflate',
first_extension.get_parameter_value('method'))
second_extension = request.ws_requested_extensions[1]
self.assertEqual('unknown', second_extension.name())
self.assertEqual(
['e', 'ma', 'pv'], second_extension.get_parameter_names())
self.assertEqual('mc^2', second_extension.get_parameter_value('e'))
self.assertEqual(' \rf ', second_extension.get_parameter_value('ma'))
self.assertEqual('nrt', second_extension.get_parameter_value('pv'))
def test_do_handshake_with_optional_headers(self):
request_def = _create_good_request_def()
request_def.headers['EmptyValue'] = ''
request_def.headers['AKey'] = 'AValue'
request = _create_request(request_def)
handshaker = _create_handshaker(request)
handshaker.do_handshake()
self.assertEqual(
'AValue', request.headers_in['AKey'])
self.assertEqual(
'', request.headers_in['EmptyValue'])
def test_abort_extra_handshake(self):
handshaker = Handshaker(
_create_request(_create_good_request_def()),
AbortedByUserDispatcher())
# do_extra_handshake raises an AbortedByUserException. Check that it's
# not caught by do_handshake.
self.assertRaises(AbortedByUserException, handshaker.do_handshake)
def test_do_handshake_with_mux_and_deflate_frame(self):
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Extensions'] = ('%s, %s' % (
common.MUX_EXTENSION,
common.DEFLATE_FRAME_EXTENSION))
request = _create_request(request_def)
handshaker = _create_handshaker(request)
handshaker.do_handshake()
# mux should be rejected.
self.assertEqual(1, len(request.ws_extensions))
self.assertEqual(common.DEFLATE_FRAME_EXTENSION,
request.ws_extensions[0].name())
self.assertEqual(2, len(request.ws_extension_processors))
self.assertEqual(common.MUX_EXTENSION,
request.ws_extension_processors[0].name())
self.assertEqual(common.DEFLATE_FRAME_EXTENSION,
request.ws_extension_processors[1].name())
self.assertFalse(hasattr(request, 'mux_processor'))
def test_do_handshake_with_deflate_frame_and_mux(self):
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Extensions'] = ('%s, %s' % (
common.DEFLATE_FRAME_EXTENSION,
common.MUX_EXTENSION))
request = _create_request(request_def)
handshaker = _create_handshaker(request)
handshaker.do_handshake()
# mux should be rejected.
self.assertEqual(1, len(request.ws_extensions))
first_extension = request.ws_extensions[0]
self.assertEqual(common.DEFLATE_FRAME_EXTENSION,
first_extension.name())
self.assertEqual(2, len(request.ws_extension_processors))
self.assertEqual(common.DEFLATE_FRAME_EXTENSION,
request.ws_extension_processors[0].name())
self.assertEqual(common.MUX_EXTENSION,
request.ws_extension_processors[1].name())
self.assertFalse(hasattr(request, 'mux'))
def test_do_handshake_with_permessage_compress_and_mux(self):
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Extensions'] = (
'%s; method=deflate, %s' % (
common.PERMESSAGE_COMPRESSION_EXTENSION,
common.MUX_EXTENSION))
request = _create_request(request_def)
handshaker = _create_handshaker(request)
handshaker.do_handshake()
self.assertEqual(1, len(request.ws_extensions))
self.assertEqual(common.MUX_EXTENSION,
request.ws_extensions[0].name())
self.assertEqual(2, len(request.ws_extension_processors))
self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION,
request.ws_extension_processors[0].name())
self.assertEqual(common.MUX_EXTENSION,
request.ws_extension_processors[1].name())
self.assertTrue(hasattr(request, 'mux_processor'))
self.assertTrue(request.mux_processor.is_active())
mux_extensions = request.mux_processor.extensions()
self.assertEqual(1, len(mux_extensions))
self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION,
mux_extensions[0].name())
def test_do_handshake_with_mux_and_permessage_compress(self):
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Extensions'] = (
'%s, %s; method=deflate' % (
common.MUX_EXTENSION,
common.PERMESSAGE_COMPRESSION_EXTENSION))
request = _create_request(request_def)
handshaker = _create_handshaker(request)
handshaker.do_handshake()
# mux should be rejected.
self.assertEqual(1, len(request.ws_extensions))
first_extension = request.ws_extensions[0]
self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION,
first_extension.name())
self.assertEqual(2, len(request.ws_extension_processors))
self.assertEqual(common.MUX_EXTENSION,
request.ws_extension_processors[0].name())
self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION,
request.ws_extension_processors[1].name())
self.assertFalse(hasattr(request, 'mux_processor'))
def test_bad_requests(self):
bad_cases = [
('HTTP request',
RequestDefinition(
'GET', '/demo',
{'Host': 'www.google.com',
'User-Agent':
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5;'
' en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3'
' GTB6 GTBA',
'Accept':
'text/html,application/xhtml+xml,application/xml;q=0.9,'
'*/*;q=0.8',
'Accept-Language': 'en-us,en;q=0.5',
'Accept-Encoding': 'gzip,deflate',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Keep-Alive': '300',
'Connection': 'keep-alive'}), None, True)]
request_def = _create_good_request_def()
request_def.method = 'POST'
bad_cases.append(('Wrong method', request_def, None, True))
request_def = _create_good_request_def()
del request_def.headers['Host']
bad_cases.append(('Missing Host', request_def, None, True))
request_def = _create_good_request_def()
del request_def.headers['Upgrade']
bad_cases.append(('Missing Upgrade', request_def, None, True))
request_def = _create_good_request_def()
request_def.headers['Upgrade'] = 'nonwebsocket'
bad_cases.append(('Wrong Upgrade', request_def, None, True))
request_def = _create_good_request_def()
del request_def.headers['Connection']
bad_cases.append(('Missing Connection', request_def, None, True))
request_def = _create_good_request_def()
request_def.headers['Connection'] = 'Downgrade'
bad_cases.append(('Wrong Connection', request_def, None, True))
request_def = _create_good_request_def()
del request_def.headers['Sec-WebSocket-Key']
bad_cases.append(('Missing Sec-WebSocket-Key', request_def, 400, True))
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Key'] = (
'dGhlIHNhbXBsZSBub25jZQ==garbage')
bad_cases.append(('Wrong Sec-WebSocket-Key (with garbage on the tail)',
request_def, 400, True))
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Key'] = 'YQ==' # BASE64 of 'a'
bad_cases.append(
('Wrong Sec-WebSocket-Key (decoded value is not 16 octets long)',
request_def, 400, True))
request_def = _create_good_request_def()
# The last character right before == must be any of A, Q, w and g.
request_def.headers['Sec-WebSocket-Key'] = (
'AQIDBAUGBwgJCgsMDQ4PEC==')
bad_cases.append(
('Wrong Sec-WebSocket-Key (padding bits are not zero)',
request_def, 400, True))
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Key'] = (
'dGhlIHNhbXBsZSBub25jZQ==,dGhlIHNhbXBsZSBub25jZQ==')
bad_cases.append(
('Wrong Sec-WebSocket-Key (multiple values)',
request_def, 400, True))
request_def = _create_good_request_def()
del request_def.headers['Sec-WebSocket-Version']
bad_cases.append(('Missing Sec-WebSocket-Version', request_def, None,
True))
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Version'] = '3'
bad_cases.append(('Wrong Sec-WebSocket-Version', request_def, None,
False))
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Version'] = '13, 13'
bad_cases.append(('Wrong Sec-WebSocket-Version (multiple values)',
request_def, 400, True))
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Protocol'] = 'illegal\x09protocol'
bad_cases.append(('Illegal Sec-WebSocket-Protocol',
request_def, 400, True))
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Protocol'] = ''
bad_cases.append(('Empty Sec-WebSocket-Protocol',
request_def, 400, True))
for (case_name, request_def, expected_status,
expect_handshake_exception) in bad_cases:
request = _create_request(request_def)
handshaker = Handshaker(request, mock.MockDispatcher())
try:
handshaker.do_handshake()
self.fail('No exception thrown for \'%s\' case' % case_name)
except HandshakeException, e:
self.assertTrue(expect_handshake_exception)
self.assertEqual(expected_status, e.status)
except VersionException, e:
self.assertFalse(expect_handshake_exception)
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 |
jwlawson/tensorflow | tensorflow/contrib/linear_optimizer/__init__.py | 158 | 1308 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for training linear models.
## This package provides optimizers to train linear models.
@@SdcaModel
@@SparseFeatureColumn
@@SDCAOptimizer
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel
from tensorflow.contrib.linear_optimizer.python.ops.sparse_feature_column import SparseFeatureColumn
from tensorflow.contrib.linear_optimizer.python.sdca_optimizer import SDCAOptimizer
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
| apache-2.0 |
paynejd/ocl_web | ocl_web/apps/collections/test_forms.py | 7 | 4902 | import mock
from libs.ocl import OclApi
from django.test import TestCase
from requests.exceptions import HTTPError
from forms import CollectionCreateForm, CollectionEditForm, CollectionVersionAddForm
from requests.models import Response
class FakeResponse(object):
""" FakeRequest class """
def __init__(self,data=None):
self.session = {}
self.GET = {}
self.detail = data
self.status_code = 200
def json(self):
return {'detail': self.detail}
def raise_for_status(self):
raise HTTPError('error', response=self)
class CollectionCreateTest(TestCase):
@mock.patch.object(OclApi, 'get')
def test_when_all_valid_data_is_provided_then_new_collection_should_be_made(self, mock_get):
form_data = {
'short_code': 'col',
'name': 'col',
'full_name': 'collection',
'collection_type': 'Dictionary',
'public_access': 'Edit',
'default_locale': 'en',
'supported_locales': 'en'
}
response = Response()
response.json = lambda: [{'locale': 'en', 'display_name': 'en'}]
mock_get.return_value = response
form = CollectionCreateForm(data=form_data)
self.assertTrue(form.is_valid())
@mock.patch.object(OclApi, 'get')
def test_when_shortName_is_not_provided_then_form_is_not_valid(self, mock_get):
form_data = {
'full_name': 'collection',
'collection_type': 'Dictionary',
'public_access': 'Edit',
'default_locale': 'en',
'supported_locales': 'en'
}
form = CollectionCreateForm(data=form_data)
self.assertFalse(form.is_valid())
@mock.patch.object(OclApi, 'get')
def test_when_FullName_is_not_provided_then_form_is_not_valid(self, mock_get):
form_data = {
'name': 'col',
'collection_type': 'Dictionary',
'public_access': 'Edit',
'default_locale': 'en',
'supported_locales': 'en'
}
form = CollectionCreateForm(data=form_data)
self.assertFalse(form.is_valid())
@mock.patch.object(OclApi, 'get')
def test_when_defaultLocale_is_not_provided_then_form_is_not_valid(self, mock_get):
form_data = {
'name': 'col',
'full_name': 'collection',
'collection_type': 'Dictionary',
'public_access': 'Edit',
'supported_locales': 'en'
}
form = CollectionCreateForm(data=form_data)
self.assertFalse(form.is_valid())
@mock.patch.object(OclApi, 'get')
def test_when_defaultLocales_is_not_provided_then_form_is_not_valid(self, mock_get):
form_data = {
'short_code': 'col',
'name': 'col',
'full_name': 'collection',
'collection_type': 'Dictionary',
'public_access': 'Edit',
'default_locale': 'en',
'supported_locales': 'en'
}
response = Response()
response.json = lambda: [{'locale': 'en', 'display_name': 'en'}]
mock_get.return_value = response
form = CollectionCreateForm(data=form_data)
self.assertTrue(form.is_valid())
class CollectionEditFormTest(TestCase):
@mock.patch.object(OclApi, 'get')
def test_when_edit_form_called_short_name_should_not_be_present(self, mock_get):
edit_form = CollectionEditForm()
self.assertFalse(edit_form.fields.__contains__('short_code'))
self.assertTrue(edit_form.fields.__contains__('name'))
self.assertTrue(edit_form.fields.__contains__('full_name'))
class CollectionVersionAddFormTest(TestCase):
def test_collectionVersionAddForm_containesOnlyId_formIsInvalid(self):
form_data = {
'id': 'v1.0',
}
form = CollectionVersionAddForm(data=form_data)
self.assertFalse(form.is_valid())
def test_collectionVersionAddForm_containesOnlyDescription_formIsInvalid(self):
form_data = {
'description': 'This is version 1.0',
}
form = CollectionVersionAddForm(data=form_data)
self.assertFalse(form.is_valid())
#TODO Testcases to check for different id inputs remaining
def test_collectionVersionAddForm_containesBothIdAndDescription_version_missing_formIsValid(self):
form_data = {
'id': 'v1.1',
'description': 'This is version 1.1',
}
form = CollectionVersionAddForm(data=form_data)
self.assertFalse(form.is_valid())
def test_collectionVersionAddForm_containesBothIdAndDescription_formIsValid(self):
form_data = {
'id': 'v1.1',
'description': 'This is version 1.1',
'previous_version': 'HEAD'
}
form = CollectionVersionAddForm(data=form_data)
self.assertTrue(form.is_valid())
| mpl-2.0 |
KSanthanam/rethinkdb | external/v8_3.30.33.16/build/gyp/buildbot/buildbot_run.py | 497 | 5998 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Argument-less script to select what to run on the buildbots."""
import os
import shutil
import subprocess
import sys
if sys.platform in ['win32', 'cygwin']:
EXE_SUFFIX = '.exe'
else:
EXE_SUFFIX = ''
BUILDBOT_DIR = os.path.dirname(os.path.abspath(__file__))
TRUNK_DIR = os.path.dirname(BUILDBOT_DIR)
ROOT_DIR = os.path.dirname(TRUNK_DIR)
ANDROID_DIR = os.path.join(ROOT_DIR, 'android')
CMAKE_DIR = os.path.join(ROOT_DIR, 'cmake')
CMAKE_BIN_DIR = os.path.join(CMAKE_DIR, 'bin')
OUT_DIR = os.path.join(TRUNK_DIR, 'out')
def CallSubProcess(*args, **kwargs):
"""Wrapper around subprocess.call which treats errors as build exceptions."""
retcode = subprocess.call(*args, **kwargs)
if retcode != 0:
print '@@@STEP_EXCEPTION@@@'
sys.exit(1)
def PrepareCmake():
"""Build CMake 2.8.8 since the version in Precise is 2.8.7."""
if os.environ['BUILDBOT_CLOBBER'] == '1':
print '@@@BUILD_STEP Clobber CMake checkout@@@'
shutil.rmtree(CMAKE_DIR)
# We always build CMake 2.8.8, so no need to do anything
# if the directory already exists.
if os.path.isdir(CMAKE_DIR):
return
print '@@@BUILD_STEP Initialize CMake checkout@@@'
os.mkdir(CMAKE_DIR)
CallSubProcess(['git', 'config', '--global', 'user.name', 'trybot'])
CallSubProcess(['git', 'config', '--global',
'user.email', 'chrome-bot@google.com'])
CallSubProcess(['git', 'config', '--global', 'color.ui', 'false'])
print '@@@BUILD_STEP Sync CMake@@@'
CallSubProcess(
['git', 'clone',
'--depth', '1',
'--single-branch',
'--branch', 'v2.8.8',
'--',
'git://cmake.org/cmake.git',
CMAKE_DIR],
cwd=CMAKE_DIR)
print '@@@BUILD_STEP Build CMake@@@'
CallSubProcess(
['/bin/bash', 'bootstrap', '--prefix=%s' % CMAKE_DIR],
cwd=CMAKE_DIR)
CallSubProcess( ['make', 'cmake'], cwd=CMAKE_DIR)
def PrepareAndroidTree():
"""Prepare an Android tree to run 'android' format tests."""
if os.environ['BUILDBOT_CLOBBER'] == '1':
print '@@@BUILD_STEP Clobber Android checkout@@@'
shutil.rmtree(ANDROID_DIR)
# The release of Android we use is static, so there's no need to do anything
# if the directory already exists.
if os.path.isdir(ANDROID_DIR):
return
print '@@@BUILD_STEP Initialize Android checkout@@@'
os.mkdir(ANDROID_DIR)
CallSubProcess(['git', 'config', '--global', 'user.name', 'trybot'])
CallSubProcess(['git', 'config', '--global',
'user.email', 'chrome-bot@google.com'])
CallSubProcess(['git', 'config', '--global', 'color.ui', 'false'])
CallSubProcess(
['repo', 'init',
'-u', 'https://android.googlesource.com/platform/manifest',
'-b', 'android-4.2.1_r1',
'-g', 'all,-notdefault,-device,-darwin,-mips,-x86'],
cwd=ANDROID_DIR)
print '@@@BUILD_STEP Sync Android@@@'
CallSubProcess(['repo', 'sync', '-j4'], cwd=ANDROID_DIR)
print '@@@BUILD_STEP Build Android@@@'
CallSubProcess(
['/bin/bash',
'-c', 'source build/envsetup.sh && lunch full-eng && make -j4'],
cwd=ANDROID_DIR)
def GypTestFormat(title, format=None, msvs_version=None):
"""Run the gyp tests for a given format, emitting annotator tags.
See annotator docs at:
https://sites.google.com/a/chromium.org/dev/developers/testing/chromium-build-infrastructure/buildbot-annotations
Args:
format: gyp format to test.
Returns:
0 for sucesss, 1 for failure.
"""
if not format:
format = title
print '@@@BUILD_STEP ' + title + '@@@'
sys.stdout.flush()
env = os.environ.copy()
if msvs_version:
env['GYP_MSVS_VERSION'] = msvs_version
command = ' '.join(
[sys.executable, 'trunk/gyptest.py',
'--all',
'--passed',
'--format', format,
'--path', CMAKE_BIN_DIR,
'--chdir', 'trunk'])
if format == 'android':
# gyptest needs the environment setup from envsetup/lunch in order to build
# using the 'android' backend, so this is done in a single shell.
retcode = subprocess.call(
['/bin/bash',
'-c', 'source build/envsetup.sh && lunch full-eng && cd %s && %s'
% (ROOT_DIR, command)],
cwd=ANDROID_DIR, env=env)
else:
retcode = subprocess.call(command, cwd=ROOT_DIR, env=env, shell=True)
if retcode:
# Emit failure tag, and keep going.
print '@@@STEP_FAILURE@@@'
return 1
return 0
def GypBuild():
# Dump out/ directory.
print '@@@BUILD_STEP cleanup@@@'
print 'Removing %s...' % OUT_DIR
shutil.rmtree(OUT_DIR, ignore_errors=True)
print 'Done.'
retcode = 0
# The Android gyp bot runs on linux so this must be tested first.
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-android':
PrepareAndroidTree()
retcode += GypTestFormat('android')
elif sys.platform.startswith('linux'):
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('make')
PrepareCmake()
retcode += GypTestFormat('cmake')
elif sys.platform == 'darwin':
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('xcode')
retcode += GypTestFormat('make')
elif sys.platform == 'win32':
retcode += GypTestFormat('ninja')
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-win64':
retcode += GypTestFormat('msvs-2010', format='msvs', msvs_version='2010')
retcode += GypTestFormat('msvs-2012', format='msvs', msvs_version='2012')
else:
raise Exception('Unknown platform')
if retcode:
# TODO(bradnelson): once the annotator supports a postscript (section for
# after the build proper that could be used for cumulative failures),
# use that instead of this. This isolates the final return value so
# that it isn't misattributed to the last stage.
print '@@@BUILD_STEP failures@@@'
sys.exit(retcode)
if __name__ == '__main__':
GypBuild()
| agpl-3.0 |
chris-allan/openmicroscopy | examples/ScreenPlateWell/imagesperwell.py | 4 | 1627 | import sys
import omero
from omero.rtypes import *
from omero_sys_ParametersI import ParametersI # Temporary
c = omero.client();
s = c.createSession();
q = s.getQueryService();
LOAD_WELLS = """select w from Well w join fetch w.wellSamples ws
join fetch ws.image i join fetch i.pixels p where w.plate.id = :id"""
filter = omero.sys.Filter();
filter.limit = rint(10)
filter.offset = rint(0)
plates = q.findAll("Plate", filter)
if len(plates) == 0:
print "No plates"
sys.exit(0)
else:
import random
example_plate = random.choice(plates)
print "Loading wells for Plate %s (%s)" % (example_plate.getId().getValue(), example_plate.getName().getValue())
# An example of true paging
filter.limit = rint(12)
params = ParametersI()
params.addId(example_plate.getId().getValue())
params.theFilter = filter
offset = 0
while True:
wells = q.findAllByQuery(LOAD_WELLS, params)
if len(wells) == 0:
break
else:
offset += len(wells)
params.theFilter.offset = rint( offset )
for well in wells:
id = well.getId().getValue()
row = well.getRow().getValue()
col = well.getColumn().getValue()
images = []
planes = 0
for ws in well.copyWellSamples():
img = ws.getImage()
pix = img.getPixels(0)
sizeC = pix.sizeC.val
sizeT = pix.sizeT.val
sizeZ = pix.sizeZ.val
images.append( img.getId().getValue() )
planes += sizeZ*sizeT*sizeC
print "Well %s (%2sx%2s) contains the images: %s with %s planes" % (id, row, col, images, planes)
| gpl-2.0 |
DARKPOP/external_chromium_org | tools/cygprofile/symbolize.py | 43 | 8785 | #!/usr/bin/python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Symbolize log file produced by cypgofile instrumentation.
Given a log file and the binary being profiled (e.g. executable, shared
library), the script can produce three different outputs: 1) symbols for the
addresses, 2) function and line numbers for the addresses, or 3) an order file.
"""
import optparse
import os
import string
import subprocess
import sys
def ParseLogLines(log_file_lines):
"""Parse a log file produced by the profiled run of clank.
Args:
log_file_lines: array of lines in log file produced by profiled run
lib_name: library or executable containing symbols
Below is an example of a small log file:
5086e000-52e92000 r-xp 00000000 b3:02 51276 libchromeview.so
secs usecs pid:threadid func
START
1314897086 795828 3587:1074648168 0x509e105c
1314897086 795874 3587:1074648168 0x509e0eb4
1314897086 796326 3587:1074648168 0x509e0e3c
1314897086 796552 3587:1074648168 0x509e07bc
END
Returns:
call_info list with list of tuples of the format (sec, usec, call id,
function address called)
"""
call_lines = []
has_started = False
vm_start = 0
line = log_file_lines[0]
assert("r-xp" in line)
end_index = line.find('-')
vm_start = int(line[:end_index], 16)
for line in log_file_lines[2:]:
# print hex(vm_start)
fields = line.split()
if len(fields) == 4:
call_lines.append(fields)
# Convert strings to int in fields.
call_info = []
for call_line in call_lines:
(sec_timestamp, usec_timestamp) = map(int, call_line[0:2])
callee_id = call_line[2]
addr = int(call_line[3], 16)
if vm_start < addr:
addr -= vm_start
call_info.append((sec_timestamp, usec_timestamp, callee_id, addr))
return call_info
def ParseLibSymbols(lib_file):
"""Get output from running nm and greping for text symbols.
Args:
lib_file: the library or executable that contains the profiled code
Returns:
list of sorted unique addresses and corresponding size of function symbols
in lib_file and map of addresses to all symbols at a particular address
"""
cmd = ['nm', '-S', '-n', lib_file]
nm_p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
output = nm_p.communicate()[0]
nm_lines = output.split('\n')
nm_symbols = []
for nm_line in nm_lines:
if any(str in nm_line for str in (' t ', ' W ', ' T ')):
nm_symbols.append(nm_line)
nm_index = 0
unique_addrs = []
address_map = {}
while nm_index < len(nm_symbols):
# If the length of the split line is not 4, then it does not contain all the
# information needed to symbolize (i.e. address, size and symbol name).
if len(nm_symbols[nm_index].split()) == 4:
(addr, size) = [int(x, 16) for x in nm_symbols[nm_index].split()[0:2]]
# Multiple symbols may be at the same address. This is do to aliasing
# done by the compiler. Since there is no way to be sure which one was
# called in profiled run, we will symbolize to include all symbol names at
# a particular address.
fnames = []
while (nm_index < len(nm_symbols) and
addr == int(nm_symbols[nm_index].split()[0], 16)):
if len(nm_symbols[nm_index].split()) == 4:
fnames.append(nm_symbols[nm_index].split()[3])
nm_index += 1
address_map[addr] = fnames
unique_addrs.append((addr, size))
else:
nm_index += 1
return (unique_addrs, address_map)
class SymbolNotFoundException(Exception):
def __init__(self,value):
self.value = value
def __str__(self):
return repr(self.value)
def BinarySearchAddresses(addr, start, end, arr):
"""Find starting address of a symbol at a particular address.
The reason we can not directly use the address provided by the log file is
that the log file may give an address after the start of the symbol. The
logged address is often one byte after the start. By using this search
function rather than just subtracting one from the logged address allows
the logging instrumentation to log any address in a function.
Args:
addr: the address being searched for
start: the starting index for the binary search
end: the ending index for the binary search
arr: the list being searched containing tuple of address and size
Returns:
the starting address of the symbol at address addr
Raises:
Exception: if address not found. Functions expects all logged addresses
to be found
"""
# print "addr: " + str(addr) + " start: " + str(start) + " end: " + str(end)
if start >= end or start == end - 1:
# arr[i] is a tuple of address and size. Check if addr inside range
if addr >= arr[start][0] and addr < arr[start][0] + arr[start][1]:
return arr[start][0]
elif addr >= arr[end][0] and addr < arr[end][0] + arr[end][1]:
return arr[end][0]
else:
raise SymbolNotFoundException(addr)
else:
halfway = (start + end) / 2
(nm_addr, size) = arr[halfway]
# print "nm_addr: " + str(nm_addr) + " halfway: " + str(halfway)
if addr >= nm_addr and addr < nm_addr + size:
return nm_addr
elif addr < nm_addr:
return BinarySearchAddresses(addr, start, halfway-1, arr)
else:
# Condition (addr >= nm_addr + size) must be true.
return BinarySearchAddresses(addr, halfway+1, end, arr)
def FindFunctions(addr, unique_addrs, address_map):
"""Find function symbol names at address addr."""
return address_map[BinarySearchAddresses(addr, 0, len(unique_addrs) - 1,
unique_addrs)]
def AddrToLine(addr, lib_file):
"""Use addr2line to determine line info of a particular address."""
cmd = ['addr2line', '-f', '-e', lib_file, hex(addr)]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
output = (p.communicate()[0]).split('\n')
line = output[0]
index = 1
while index < len(output):
line = line + ':' + output[index]
index += 1
return line
def main():
"""Write output for profiled run to standard out.
The format of the output depends on the output type specified as the third
command line argument. The default output type is to symbolize the addresses
of the functions called.
"""
parser = optparse.OptionParser('usage: %prog [options] log_file lib_file')
parser.add_option('-t', '--outputType', dest='output_type',
default='symbolize', type='string',
help='lineize or symbolize or orderfile')
# Option for output type. The log file and lib file arguments are required
# by the script and therefore are not options.
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error('expected 2 args: log_file lib_file')
(log_file, lib_file) = args
output_type = options.output_type
lib_name = lib_file.split('/')[-1].strip()
log_file_lines = map(string.rstrip, open(log_file).readlines())
call_info = ParseLogLines(log_file_lines)
(unique_addrs, address_map) = ParseLibSymbols(lib_file)
# Check for duplicate addresses in the log file, and print a warning if
# duplicates are found. The instrumentation that produces the log file
# should only print the first time a function is entered.
addr_list = []
for call in call_info:
addr = call[3]
if addr not in addr_list:
addr_list.append(addr)
else:
print('WARNING: Address ' + hex(addr) + ' (line= ' +
AddrToLine(addr, lib_file) + ') already profiled.')
for call in call_info:
if output_type == 'lineize':
symbol = AddrToLine(call[3], lib_file)
print(str(call[0]) + ' ' + str(call[1]) + '\t' + str(call[2]) + '\t'
+ symbol)
elif output_type == 'orderfile':
try:
symbols = FindFunctions(call[3], unique_addrs, address_map)
for symbol in symbols:
print '.text.' + symbol
print ''
except SymbolNotFoundException as e:
sys.stderr.write('WARNING: Did not find function in binary. addr: '
+ hex(addr) + '\n')
else:
try:
symbols = FindFunctions(call[3], unique_addrs, address_map)
print(str(call[0]) + ' ' + str(call[1]) + '\t' + str(call[2]) + '\t'
+ symbols[0])
first_symbol = True
for symbol in symbols:
if not first_symbol:
print '\t\t\t\t\t' + symbol
else:
first_symbol = False
except SymbolNotFoundException as e:
sys.stderr.write('WARNING: Did not find function in binary. addr: '
+ hex(addr) + '\n')
if __name__ == '__main__':
main()
| bsd-3-clause |
ZachGoldberg/python-quickbooks | quickbooks/objects/tax.py | 3 | 1164 | from six import python_2_unicode_compatible
from .base import QuickbooksBaseObject, Ref, QuickbooksManagedObject
@python_2_unicode_compatible
class TaxLineDetail(QuickbooksBaseObject):
class_dict = {
"TaxRateRef": Ref
}
def __init__(self):
super(TaxLineDetail, self).__init__()
self.PercentBased = True
self.TaxPercent = 0
self.NetAmountTaxable = 0
def __str__(self):
return str(self.TaxPercent)
@python_2_unicode_compatible
class TaxLine(QuickbooksBaseObject):
class_dict = {
"TaxLineDetail": TaxLineDetail
}
def __init__(self):
super(TaxLine, self).__init__()
self.Amount = 0
self.DetailType = ""
def __str__(self):
return str(self.Amount)
@python_2_unicode_compatible
class TxnTaxDetail(QuickbooksBaseObject):
class_dict = {
"TxnTaxCodeRef": Ref,
}
list_dict = {
"TaxLine": TaxLine
}
def __init__(self):
super(TxnTaxDetail, self).__init__()
self.TotalTax = 0
self.TxnTaxCodeRef = None
self.TaxLine = []
def __str__(self):
return str(self.TotalTax)
| mit |
premanandchandrasekar/boto | boto/dynamodb/table.py | 31 | 21808 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.dynamodb.batch import BatchList
from boto.dynamodb.schema import Schema
from boto.dynamodb.item import Item
from boto.dynamodb import exceptions as dynamodb_exceptions
import time
class TableBatchGenerator(object):
"""
A low-level generator used to page through results from
batch_get_item operations.
:ivar consumed_units: An integer that holds the number of
ConsumedCapacityUnits accumulated thus far for this
generator.
"""
def __init__(self, table, keys, attributes_to_get=None,
consistent_read=False):
self.table = table
self.keys = keys
self.consumed_units = 0
self.attributes_to_get = attributes_to_get
self.consistent_read = consistent_read
def _queue_unprocessed(self, res):
if not u'UnprocessedKeys' in res:
return
if not self.table.name in res[u'UnprocessedKeys']:
return
keys = res[u'UnprocessedKeys'][self.table.name][u'Keys']
for key in keys:
h = key[u'HashKeyElement']
r = key[u'RangeKeyElement'] if u'RangeKeyElement' in key else None
self.keys.append((h, r))
def __iter__(self):
while self.keys:
# Build the next batch
batch = BatchList(self.table.layer2)
batch.add_batch(self.table, self.keys[:100],
self.attributes_to_get)
res = batch.submit()
# parse the results
if not self.table.name in res[u'Responses']:
continue
self.consumed_units += res[u'Responses'][self.table.name][u'ConsumedCapacityUnits']
for elem in res[u'Responses'][self.table.name][u'Items']:
yield elem
# re-queue un processed keys
self.keys = self.keys[100:]
self._queue_unprocessed(res)
class Table(object):
"""
An Amazon DynamoDB table.
:ivar name: The name of the table.
:ivar create_time: The date and time that the table was created.
:ivar status: The current status of the table. One of:
'ACTIVE', 'UPDATING', 'DELETING'.
:ivar schema: A :class:`boto.dynamodb.schema.Schema` object representing
the schema defined for the table.
:ivar item_count: The number of items in the table. This value is
set only when the Table object is created or refreshed and
may not reflect the actual count.
:ivar size_bytes: Total size of the specified table, in bytes.
Amazon DynamoDB updates this value approximately every six hours.
Recent changes might not be reflected in this value.
:ivar read_units: The ReadCapacityUnits of the tables
Provisioned Throughput.
:ivar write_units: The WriteCapacityUnits of the tables
Provisioned Throughput.
:ivar schema: The Schema object associated with the table.
"""
def __init__(self, layer2, response):
"""
:type layer2: :class:`boto.dynamodb.layer2.Layer2`
:param layer2: A `Layer2` api object.
:type response: dict
:param response: The output of
`boto.dynamodb.layer1.Layer1.describe_table`.
"""
self.layer2 = layer2
self._dict = {}
self.update_from_response(response)
@classmethod
def create_from_schema(cls, layer2, name, schema):
"""Create a Table object.
If you know the name and schema of your table, you can
create a ``Table`` object without having to make any
API calls (normally an API call is made to retrieve
the schema of a table).
Example usage::
table = Table.create_from_schema(
boto.connect_dynamodb(),
'tablename',
Schema.create(hash_key=('keyname', 'N')))
:type layer2: :class:`boto.dynamodb.layer2.Layer2`
:param layer2: A ``Layer2`` api object.
:type name: str
:param name: The name of the table.
:type schema: :class:`boto.dynamodb.schema.Schema`
:param schema: The schema associated with the table.
:rtype: :class:`boto.dynamodb.table.Table`
:return: A Table object representing the table.
"""
table = cls(layer2, {'Table': {'TableName': name}})
table._schema = schema
return table
def __repr__(self):
return 'Table(%s)' % self.name
@property
def name(self):
return self._dict['TableName']
@property
def create_time(self):
return self._dict.get('CreationDateTime', None)
@property
def status(self):
return self._dict.get('TableStatus', None)
@property
def item_count(self):
return self._dict.get('ItemCount', 0)
@property
def size_bytes(self):
return self._dict.get('TableSizeBytes', 0)
@property
def schema(self):
return self._schema
@property
def read_units(self):
try:
return self._dict['ProvisionedThroughput']['ReadCapacityUnits']
except KeyError:
return None
@property
def write_units(self):
try:
return self._dict['ProvisionedThroughput']['WriteCapacityUnits']
except KeyError:
return None
def update_from_response(self, response):
"""
Update the state of the Table object based on the response
data received from Amazon DynamoDB.
"""
# 'Table' is from a describe_table call.
if 'Table' in response:
self._dict.update(response['Table'])
# 'TableDescription' is from a create_table call.
elif 'TableDescription' in response:
self._dict.update(response['TableDescription'])
if 'KeySchema' in self._dict:
self._schema = Schema(self._dict['KeySchema'])
def refresh(self, wait_for_active=False, retry_seconds=5):
"""
Refresh all of the fields of the Table object by calling
the underlying DescribeTable request.
:type wait_for_active: bool
:param wait_for_active: If True, this command will not return
until the table status, as returned from Amazon DynamoDB, is
'ACTIVE'.
:type retry_seconds: int
:param retry_seconds: If wait_for_active is True, this
parameter controls the number of seconds of delay between
calls to update_table in Amazon DynamoDB. Default is 5 seconds.
"""
done = False
while not done:
response = self.layer2.describe_table(self.name)
self.update_from_response(response)
if wait_for_active:
if self.status == 'ACTIVE':
done = True
else:
time.sleep(retry_seconds)
else:
done = True
def update_throughput(self, read_units, write_units):
"""
Update the ProvisionedThroughput for the Amazon DynamoDB Table.
:type read_units: int
:param read_units: The new value for ReadCapacityUnits.
:type write_units: int
:param write_units: The new value for WriteCapacityUnits.
"""
self.layer2.update_throughput(self, read_units, write_units)
def delete(self):
"""
Delete this table and all items in it. After calling this
the Table objects status attribute will be set to 'DELETING'.
"""
self.layer2.delete_table(self)
def get_item(self, hash_key, range_key=None,
attributes_to_get=None, consistent_read=False,
item_class=Item):
"""
Retrieve an existing item from the table.
:type hash_key: int|long|float|str|unicode|Binary
:param hash_key: The HashKey of the requested item. The
type of the value must match the type defined in the
schema for the table.
:type range_key: int|long|float|str|unicode|Binary
:param range_key: The optional RangeKey of the requested item.
The type of the value must match the type defined in the
schema for the table.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
"""
return self.layer2.get_item(self, hash_key, range_key,
attributes_to_get, consistent_read,
item_class)
lookup = get_item
def has_item(self, hash_key, range_key=None, consistent_read=False):
"""
Checks the table to see if the Item with the specified ``hash_key``
exists. This may save a tiny bit of time/bandwidth over a
straight :py:meth:`get_item` if you have no intention to touch
the data that is returned, since this method specifically tells
Amazon not to return anything but the Item's key.
:type hash_key: int|long|float|str|unicode|Binary
:param hash_key: The HashKey of the requested item. The
type of the value must match the type defined in the
schema for the table.
:type range_key: int|long|float|str|unicode|Binary
:param range_key: The optional RangeKey of the requested item.
The type of the value must match the type defined in the
schema for the table.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:rtype: bool
:returns: ``True`` if the Item exists, ``False`` if not.
"""
try:
# Attempt to get the key. If it can't be found, it'll raise
# an exception.
self.get_item(hash_key, range_key=range_key,
# This minimizes the size of the response body.
attributes_to_get=[hash_key],
consistent_read=consistent_read)
except dynamodb_exceptions.DynamoDBKeyNotFoundError:
# Key doesn't exist.
return False
return True
def new_item(self, hash_key=None, range_key=None, attrs=None,
item_class=Item):
"""
Return an new, unsaved Item which can later be PUT to
Amazon DynamoDB.
This method has explicit (but optional) parameters for
the hash_key and range_key values of the item. You can use
these explicit parameters when calling the method, such as::
>>> my_item = my_table.new_item(hash_key='a', range_key=1,
attrs={'key1': 'val1', 'key2': 'val2'})
>>> my_item
{u'bar': 1, u'foo': 'a', 'key1': 'val1', 'key2': 'val2'}
Or, if you prefer, you can simply put the hash_key and range_key
in the attrs dictionary itself, like this::
>>> attrs = {'foo': 'a', 'bar': 1, 'key1': 'val1', 'key2': 'val2'}
>>> my_item = my_table.new_item(attrs=attrs)
>>> my_item
{u'bar': 1, u'foo': 'a', 'key1': 'val1', 'key2': 'val2'}
The effect is the same.
.. note:
The explicit parameters take priority over the values in
the attrs dict. So, if you have a hash_key or range_key
in the attrs dict and you also supply either or both using
the explicit parameters, the values in the attrs will be
ignored.
:type hash_key: int|long|float|str|unicode|Binary
:param hash_key: The HashKey of the new item. The
type of the value must match the type defined in the
schema for the table.
:type range_key: int|long|float|str|unicode|Binary
:param range_key: The optional RangeKey of the new item.
The type of the value must match the type defined in the
schema for the table.
:type attrs: dict
:param attrs: A dictionary of key value pairs used to
populate the new item.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
"""
return item_class(self, hash_key, range_key, attrs)
def query(self, hash_key, *args, **kw):
"""
Perform a query on the table.
:type hash_key: int|long|float|str|unicode|Binary
:param hash_key: The HashKey of the requested item. The
type of the value must match the type defined in the
schema for the table.
:type range_key_condition: :class:`boto.dynamodb.condition.Condition`
:param range_key_condition: A Condition object.
Condition object can be one of the following types:
EQ|LE|LT|GE|GT|BEGINS_WITH|BETWEEN
The only condition which expects or will accept two
values is 'BETWEEN', otherwise a single value should
be passed to the Condition constructor.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type request_limit: int
:param request_limit: The maximum number of items to retrieve
from Amazon DynamoDB on each request. You may want to set
a specific request_limit based on the provisioned throughput
of your table. The default behavior is to retrieve as many
results as possible per request.
:type max_results: int
:param max_results: The maximum number of results that will
be retrieved from Amazon DynamoDB in total. For example,
if you only wanted to see the first 100 results from the
query, regardless of how many were actually available, you
could set max_results to 100 and the generator returned
from the query method will only yeild 100 results max.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:type scan_index_forward: bool
:param scan_index_forward: Specified forward or backward
traversal of the index. Default is forward (True).
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Query operation, even if the
operation has no matching items for the assigned filter.
If count is True, the actual items are not returned and
the count is accessible as the ``count`` attribute of
the returned object.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
"""
return self.layer2.query(self, hash_key, *args, **kw)
def scan(self, *args, **kw):
"""
Scan through this table, this is a very long
and expensive operation, and should be avoided if
at all possible.
:type scan_filter: A dict
:param scan_filter: A dictionary where the key is the
attribute name and the value is a
:class:`boto.dynamodb.condition.Condition` object.
Valid Condition objects include:
* EQ - equal (1)
* NE - not equal (1)
* LE - less than or equal (1)
* LT - less than (1)
* GE - greater than or equal (1)
* GT - greater than (1)
* NOT_NULL - attribute exists (0, use None)
* NULL - attribute does not exist (0, use None)
* CONTAINS - substring or value in list (1)
* NOT_CONTAINS - absence of substring or value in list (1)
* BEGINS_WITH - substring prefix (1)
* IN - exact match in list (N)
* BETWEEN - >= first value, <= second value (2)
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type request_limit: int
:param request_limit: The maximum number of items to retrieve
from Amazon DynamoDB on each request. You may want to set
a specific request_limit based on the provisioned throughput
of your table. The default behavior is to retrieve as many
results as possible per request.
:type max_results: int
:param max_results: The maximum number of results that will
be retrieved from Amazon DynamoDB in total. For example,
if you only wanted to see the first 100 results from the
query, regardless of how many were actually available, you
could set max_results to 100 and the generator returned
from the query method will only yeild 100 results max.
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Scan operation, even if the
operation has no matching items for the assigned filter.
If count is True, the actual items are not returned and
the count is accessible as the ``count`` attribute of
the returned object.
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
:return: A TableGenerator (generator) object which will iterate
over all results
:rtype: :class:`boto.dynamodb.layer2.TableGenerator`
"""
return self.layer2.scan(self, *args, **kw)
def batch_get_item(self, keys, attributes_to_get=None):
"""
Return a set of attributes for a multiple items from a single table
using their primary keys. This abstraction removes the 100 Items per
batch limitations as well as the "UnprocessedKeys" logic.
:type keys: list
:param keys: A list of scalar or tuple values. Each element in the
list represents one Item to retrieve. If the schema for the
table has both a HashKey and a RangeKey, each element in the
list should be a tuple consisting of (hash_key, range_key). If
the schema for the table contains only a HashKey, each element
in the list should be a scalar value of the appropriate type
for the table schema. NOTE: The maximum number of items that
can be retrieved for a single operation is 100. Also, the
number of items retrieved is constrained by a 1 MB size limit.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:return: A TableBatchGenerator (generator) object which will
iterate over all results
:rtype: :class:`boto.dynamodb.table.TableBatchGenerator`
"""
return TableBatchGenerator(self, keys, attributes_to_get)
| mit |
minsis/hashgen | libs/hashinglib.py | 1 | 1157 | import hashlib
def read_file(file_path, hashed_file):
with open(file_path, "rb") as open_file:
read_byte = open_file.read(1048576)
while read_byte:
hashed_file.update(read_byte)
read_byte = open_file.read(1048576)
return hashed_file.hexdigest()
def run_hash(file_path, algo_hash):
hashed_file = hashlib.new(algo_hash)
return read_file(file_path, hashed_file)
def convert_to_dict(hash_list):
hash_dict = dict()
for hash_type in hash_list:
hash_dict[hash_type] = globals()[hash_type]
return hash_dict
def compare(compare_hash, hash_value):
if compare_hash == hash_value:
print("Your hash values match")
else:
print("Your hash values do not match")
def get_hashes():
builtin_hash = list(hashlib.algorithms_guaranteed)
all_hash = list(hashlib.algorithms_available)
for item in builtin_hash:
if item.upper() in all_hash:
all_hash.remove(item.upper())
all_hash.sort()
return all_hash
def format_hash(all_hash):
tmp_lst = list()
for item in all_hash:
tmp_lst.append(item.lower())
return tmp_lst
| gpl-3.0 |
sYnfo/samba-1 | third_party/waf/wafadmin/Tools/ccroot.py | 12 | 19214 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2008 (ita)
"base for all c/c++ programs and libraries"
import os, sys, re
import TaskGen, Task, Utils, preproc, Logs, Build, Options
from Logs import error, debug, warn
from Utils import md5
from TaskGen import taskgen, after, before, feature
from Constants import *
from Configure import conftest
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import config_c # <- necessary for the configuration, do not touch
USE_TOP_LEVEL = False
def get_cc_version(conf, cc, gcc=False, icc=False):
cmd = cc + ['-dM', '-E', '-']
try:
p = Utils.pproc.Popen(cmd, stdin=Utils.pproc.PIPE, stdout=Utils.pproc.PIPE, stderr=Utils.pproc.PIPE)
p.stdin.write('\n')
out = p.communicate()[0]
except:
conf.fatal('could not determine the compiler version %r' % cmd)
# PY3K: do not touch
out = str(out)
if gcc:
if out.find('__INTEL_COMPILER') >= 0:
conf.fatal('The intel compiler pretends to be gcc')
if out.find('__GNUC__') < 0:
conf.fatal('Could not determine the compiler type')
if icc and out.find('__INTEL_COMPILER') < 0:
conf.fatal('Not icc/icpc')
k = {}
if icc or gcc:
out = out.split('\n')
import shlex
for line in out:
lst = shlex.split(line)
if len(lst)>2:
key = lst[1]
val = lst[2]
k[key] = val
def isD(var):
return var in k
def isT(var):
return var in k and k[var] != '0'
# Some documentation is available at http://predef.sourceforge.net
# The names given to DEST_OS must match what Utils.unversioned_sys_platform() returns.
mp1 = {
'__linux__' : 'linux',
'__GNU__' : 'gnu',
'__FreeBSD__' : 'freebsd',
'__NetBSD__' : 'netbsd',
'__OpenBSD__' : 'openbsd',
'__sun' : 'sunos',
'__hpux' : 'hpux',
'__sgi' : 'irix',
'_AIX' : 'aix',
'__CYGWIN__' : 'cygwin',
'__MSYS__' : 'msys',
'_UWIN' : 'uwin',
'_WIN64' : 'win32',
'_WIN32' : 'win32',
'__POWERPC__' : 'powerpc',
}
for i in mp1:
if isD(i):
conf.env.DEST_OS = mp1[i]
break
else:
if isD('__APPLE__') and isD('__MACH__'):
conf.env.DEST_OS = 'darwin'
elif isD('__unix__'): # unix must be tested last as it's a generic fallback
conf.env.DEST_OS = 'generic'
if isD('__ELF__'):
conf.env.DEST_BINFMT = 'elf'
elif isD('__WINNT__') or isD('__CYGWIN__'):
conf.env.DEST_BINFMT = 'pe'
elif isD('__APPLE__'):
conf.env.DEST_BINFMT = 'mac-o'
mp2 = {
'__x86_64__' : 'x86_64',
'__i386__' : 'x86',
'__ia64__' : 'ia',
'__mips__' : 'mips',
'__sparc__' : 'sparc',
'__alpha__' : 'alpha',
'__arm__' : 'arm',
'__hppa__' : 'hppa',
'__powerpc__' : 'powerpc',
}
for i in mp2:
if isD(i):
conf.env.DEST_CPU = mp2[i]
break
debug('ccroot: dest platform: ' + ' '.join([conf.env[x] or '?' for x in ('DEST_OS', 'DEST_BINFMT', 'DEST_CPU')]))
conf.env['CC_VERSION'] = (k['__GNUC__'], k['__GNUC_MINOR__'], k['__GNUC_PATCHLEVEL__'])
return k
class DEBUG_LEVELS:
"""Will disappear in waf 1.6"""
ULTRADEBUG = "ultradebug"
DEBUG = "debug"
RELEASE = "release"
OPTIMIZED = "optimized"
CUSTOM = "custom"
ALL = [ULTRADEBUG, DEBUG, RELEASE, OPTIMIZED, CUSTOM]
def scan(self):
"look for .h the .cpp need"
debug('ccroot: _scan_preprocessor(self, node, env, path_lst)')
# TODO waf 1.6 - assume the default input has exactly one file
if len(self.inputs) == 1:
node = self.inputs[0]
(nodes, names) = preproc.get_deps(node, self.env, nodepaths = self.env['INC_PATHS'])
if Logs.verbose:
debug('deps: deps for %s: %r; unresolved %r', str(node), nodes, names)
return (nodes, names)
all_nodes = []
all_names = []
seen = set()
for node in self.inputs:
(nodes, names) = preproc.get_deps(node, self.env, nodepaths = self.env['INC_PATHS'])
if Logs.verbose:
debug('deps: deps for %s: %r; unresolved %r', str(node), nodes, names)
for x in nodes:
if id(x) in seen: continue
seen.add(id(x))
all_nodes.append(x)
for x in names:
if not x in all_names:
all_names.append(x)
return (all_nodes, all_names)
class ccroot_abstract(TaskGen.task_gen):
"Parent class for programs and libraries in languages c, c++ and moc (Qt)"
def __init__(self, *k, **kw):
# COMPAT remove in waf 1.6 TODO
if len(k) > 1:
k = list(k)
if k[1][0] != 'c':
k[1] = 'c' + k[1]
TaskGen.task_gen.__init__(self, *k, **kw)
def get_target_name(self):
tp = 'program'
for x in self.features:
if x in ['cshlib', 'cstaticlib']:
tp = x.lstrip('c')
pattern = self.env[tp + '_PATTERN']
if not pattern: pattern = '%s'
dir, name = os.path.split(self.target)
if 'cshlib' in self.features and getattr(self, 'vnum', None):
nums = self.vnum.split('.')
if self.env.DEST_BINFMT == 'pe':
# include the version in the dll file name,
# the import lib file name stays unversionned.
name = name + '-' + nums[0]
elif self.env.DEST_OS == 'openbsd':
pattern = '%s.%s' % (pattern, nums[0])
if len(nums) >= 2:
pattern += '.%s' % nums[1]
return os.path.join(dir, pattern % name)
@feature('cc', 'cxx')
@before('apply_core')
def default_cc(self):
"""compiled_tasks attribute must be set before the '.c->.o' tasks can be created"""
Utils.def_attrs(self,
includes = '',
defines= '',
rpaths = '',
uselib = '',
uselib_local = '',
add_objects = '',
p_flag_vars = [],
p_type_vars = [],
compiled_tasks = [],
link_task = None)
# The only thing we need for cross-compilation is DEST_BINFMT.
# At some point, we may reach a case where DEST_BINFMT is not enough, but for now it's sufficient.
# Currently, cross-compilation is auto-detected only for the gnu and intel compilers.
if not self.env.DEST_BINFMT:
# Infer the binary format from the os name.
self.env.DEST_BINFMT = Utils.unversioned_sys_platform_to_binary_format(
self.env.DEST_OS or Utils.unversioned_sys_platform())
if not self.env.BINDIR: self.env.BINDIR = Utils.subst_vars('${PREFIX}/bin', self.env)
if not self.env.LIBDIR: self.env.LIBDIR = Utils.subst_vars('${PREFIX}/lib${LIB_EXT}', self.env)
@feature('cprogram', 'dprogram', 'cstaticlib', 'dstaticlib', 'cshlib', 'dshlib')
def apply_verif(self):
"""no particular order, used for diagnostic"""
if not (self.source or getattr(self, 'add_objects', None) or getattr(self, 'uselib_local', None) or getattr(self, 'obj_files', None)):
raise Utils.WafError('no source files specified for %s' % self)
if not self.target:
raise Utils.WafError('no target for %s' % self)
# TODO reference the d programs, shlibs in d.py, not here
@feature('cprogram', 'dprogram')
@after('default_cc')
@before('apply_core')
def vars_target_cprogram(self):
self.default_install_path = self.env.BINDIR
self.default_chmod = O755
@after('default_cc')
@feature('cshlib', 'dshlib')
@before('apply_core')
def vars_target_cshlib(self):
if self.env.DEST_BINFMT == 'pe':
# set execute bit on libs to avoid 'permission denied' (issue 283)
self.default_chmod = O755
self.default_install_path = self.env.BINDIR
else:
self.default_install_path = self.env.LIBDIR
@feature('cprogram', 'dprogram', 'cstaticlib', 'dstaticlib', 'cshlib', 'dshlib')
@after('apply_link', 'vars_target_cprogram', 'vars_target_cshlib')
def default_link_install(self):
"""you may kill this method to inject your own installation for the first element
any other install should only process its own nodes and not those from the others"""
if self.install_path:
self.bld.install_files(self.install_path, self.link_task.outputs[0], env=self.env, chmod=self.chmod)
@feature('cc', 'cxx')
@after('apply_type_vars', 'apply_lib_vars', 'apply_core')
def apply_incpaths(self):
"""used by the scanner
after processing the uselib for CPPPATH
after apply_core because some processing may add include paths
"""
lst = []
# TODO move the uselib processing out of here
for lib in self.to_list(self.uselib):
for path in self.env['CPPPATH_' + lib]:
if not path in lst:
lst.append(path)
if preproc.go_absolute:
for path in preproc.standard_includes:
if not path in lst:
lst.append(path)
for path in self.to_list(self.includes):
if not path in lst:
if preproc.go_absolute or not os.path.isabs(path):
lst.append(path)
else:
self.env.prepend_value('CPPPATH', path)
for path in lst:
node = None
if os.path.isabs(path):
if preproc.go_absolute:
node = self.bld.root.find_dir(path)
elif path[0] == '#':
node = self.bld.srcnode
if len(path) > 1:
node = node.find_dir(path[1:])
else:
node = self.path.find_dir(path)
if node:
self.env.append_value('INC_PATHS', node)
# TODO WAF 1.6
if USE_TOP_LEVEL:
self.env.append_value('INC_PATHS', self.bld.srcnode)
@feature('cc', 'cxx')
@after('init_cc', 'init_cxx')
@before('apply_lib_vars')
def apply_type_vars(self):
"""before apply_lib_vars because we modify uselib
after init_cc and init_cxx because web need p_type_vars
"""
for x in self.features:
if not x in ['cprogram', 'cstaticlib', 'cshlib']:
continue
x = x.lstrip('c')
# if the type defines uselib to add, add them
st = self.env[x + '_USELIB']
if st: self.uselib = self.uselib + ' ' + st
# each compiler defines variables like 'shlib_CXXFLAGS', 'shlib_LINKFLAGS', etc
# so when we make a task generator of the type shlib, CXXFLAGS are modified accordingly
for var in self.p_type_vars:
compvar = '%s_%s' % (x, var)
#print compvar
value = self.env[compvar]
if value: self.env.append_value(var, value)
@feature('cprogram', 'cshlib', 'cstaticlib')
@after('apply_core')
def apply_link(self):
"""executes after apply_core for collecting 'compiled_tasks'
use a custom linker if specified (self.link='name-of-custom-link-task')"""
link = getattr(self, 'link', None)
if not link:
if 'cstaticlib' in self.features: link = 'static_link'
elif 'cxx' in self.features: link = 'cxx_link'
else: link = 'cc_link'
tsk = self.create_task(link)
outputs = [t.outputs[0] for t in self.compiled_tasks]
tsk.set_inputs(outputs)
tsk.set_outputs(self.path.find_or_declare(get_target_name(self)))
self.link_task = tsk
@feature('cc', 'cxx')
@after('apply_link', 'init_cc', 'init_cxx', 'apply_core')
def apply_lib_vars(self):
"""after apply_link because of 'link_task'
after default_cc because of the attribute 'uselib'"""
# after 'apply_core' in case if 'cc' if there is no link
env = self.env
# 1. the case of the libs defined in the project (visit ancestors first)
# the ancestors external libraries (uselib) will be prepended
self.uselib = self.to_list(self.uselib)
names = self.to_list(self.uselib_local)
seen = set([])
tmp = Utils.deque(names) # consume a copy of the list of names
while tmp:
lib_name = tmp.popleft()
# visit dependencies only once
if lib_name in seen:
continue
y = self.name_to_obj(lib_name)
if not y:
raise Utils.WafError('object %r was not found in uselib_local (required by %r)' % (lib_name, self.name))
y.post()
seen.add(lib_name)
# object has ancestors to process (shared libraries): add them to the end of the list
if getattr(y, 'uselib_local', None):
lst = y.to_list(y.uselib_local)
if 'cshlib' in y.features or 'cprogram' in y.features:
lst = [x for x in lst if not 'cstaticlib' in self.name_to_obj(x).features]
tmp.extend(lst)
# link task and flags
if getattr(y, 'link_task', None):
link_name = y.target[y.target.rfind(os.sep) + 1:]
if 'cstaticlib' in y.features:
env.append_value('STATICLIB', link_name)
elif 'cshlib' in y.features or 'cprogram' in y.features:
# WARNING some linkers can link against programs
env.append_value('LIB', link_name)
# the order
self.link_task.set_run_after(y.link_task)
# for the recompilation
dep_nodes = getattr(self.link_task, 'dep_nodes', [])
self.link_task.dep_nodes = dep_nodes + y.link_task.outputs
# add the link path too
tmp_path = y.link_task.outputs[0].parent.bldpath(self.env)
if not tmp_path in env['LIBPATH']: env.prepend_value('LIBPATH', tmp_path)
# add ancestors uselib too - but only propagate those that have no staticlib
for v in self.to_list(y.uselib):
if not env['STATICLIB_' + v]:
if not v in self.uselib:
self.uselib.insert(0, v)
# if the library task generator provides 'export_incdirs', add to the include path
# the export_incdirs must be a list of paths relative to the other library
if getattr(y, 'export_incdirs', None):
for x in self.to_list(y.export_incdirs):
node = y.path.find_dir(x)
if not node:
raise Utils.WafError('object %r: invalid folder %r in export_incdirs' % (y.target, x))
self.env.append_unique('INC_PATHS', node)
# 2. the case of the libs defined outside
for x in self.uselib:
for v in self.p_flag_vars:
val = self.env[v + '_' + x]
if val: self.env.append_value(v, val)
@feature('cprogram', 'cstaticlib', 'cshlib')
@after('init_cc', 'init_cxx', 'apply_link')
def apply_objdeps(self):
"add the .o files produced by some other object files in the same manner as uselib_local"
if not getattr(self, 'add_objects', None): return
seen = []
names = self.to_list(self.add_objects)
while names:
x = names[0]
# visit dependencies only once
if x in seen:
names = names[1:]
continue
# object does not exist ?
y = self.name_to_obj(x)
if not y:
raise Utils.WafError('object %r was not found in uselib_local (required by add_objects %r)' % (x, self.name))
# object has ancestors to process first ? update the list of names
if getattr(y, 'add_objects', None):
added = 0
lst = y.to_list(y.add_objects)
lst.reverse()
for u in lst:
if u in seen: continue
added = 1
names = [u]+names
if added: continue # list of names modified, loop
# safe to process the current object
y.post()
seen.append(x)
for t in y.compiled_tasks:
self.link_task.inputs.extend(t.outputs)
@feature('cprogram', 'cshlib', 'cstaticlib')
@after('apply_lib_vars')
def apply_obj_vars(self):
"""after apply_lib_vars for uselib"""
v = self.env
lib_st = v['LIB_ST']
staticlib_st = v['STATICLIB_ST']
libpath_st = v['LIBPATH_ST']
staticlibpath_st = v['STATICLIBPATH_ST']
rpath_st = v['RPATH_ST']
app = v.append_unique
if v['FULLSTATIC']:
v.append_value('LINKFLAGS', v['FULLSTATIC_MARKER'])
for i in v['RPATH']:
if i and rpath_st:
app('LINKFLAGS', rpath_st % i)
for i in v['LIBPATH']:
app('LINKFLAGS', libpath_st % i)
app('LINKFLAGS', staticlibpath_st % i)
if v['STATICLIB']:
v.append_value('LINKFLAGS', v['STATICLIB_MARKER'])
k = [(staticlib_st % i) for i in v['STATICLIB']]
app('LINKFLAGS', k)
# fully static binaries ?
if not v['FULLSTATIC']:
if v['STATICLIB'] or v['LIB']:
v.append_value('LINKFLAGS', v['SHLIB_MARKER'])
app('LINKFLAGS', [lib_st % i for i in v['LIB']])
@after('apply_link')
def process_obj_files(self):
if not hasattr(self, 'obj_files'): return
for x in self.obj_files:
node = self.path.find_resource(x)
self.link_task.inputs.append(node)
@taskgen
def add_obj_file(self, file):
"""Small example on how to link object files as if they were source
obj = bld.create_obj('cc')
obj.add_obj_file('foo.o')"""
if not hasattr(self, 'obj_files'): self.obj_files = []
if not 'process_obj_files' in self.meths: self.meths.append('process_obj_files')
self.obj_files.append(file)
c_attrs = {
'cxxflag' : 'CXXFLAGS',
'cflag' : 'CCFLAGS',
'ccflag' : 'CCFLAGS',
'linkflag' : 'LINKFLAGS',
'ldflag' : 'LINKFLAGS',
'lib' : 'LIB',
'libpath' : 'LIBPATH',
'staticlib': 'STATICLIB',
'staticlibpath': 'STATICLIBPATH',
'rpath' : 'RPATH',
'framework' : 'FRAMEWORK',
'frameworkpath' : 'FRAMEWORKPATH'
}
@feature('cc', 'cxx')
@before('init_cxx', 'init_cc')
@before('apply_lib_vars', 'apply_obj_vars', 'apply_incpaths', 'init_cc')
def add_extra_flags(self):
"""case and plural insensitive
before apply_obj_vars for processing the library attributes
"""
for x in self.__dict__.keys():
y = x.lower()
if y[-1] == 's':
y = y[:-1]
if c_attrs.get(y, None):
self.env.append_unique(c_attrs[y], getattr(self, x))
# ============ the code above must not know anything about import libs ==========
@feature('cshlib')
@after('apply_link', 'default_cc')
@before('apply_lib_vars', 'apply_objdeps', 'default_link_install')
def apply_implib(self):
"""On mswindows, handle dlls and their import libs
the .dll.a is the import lib and it is required for linking so it is installed too
"""
if not self.env.DEST_BINFMT == 'pe':
return
self.meths.remove('default_link_install')
bindir = self.install_path
if not bindir: return
# install the dll in the bin dir
dll = self.link_task.outputs[0]
self.bld.install_files(bindir, dll, self.env, self.chmod)
# add linker flags to generate the import lib
implib = self.env['implib_PATTERN'] % os.path.split(self.target)[1]
implib = dll.parent.find_or_declare(implib)
self.link_task.outputs.append(implib)
self.bld.install_as('${LIBDIR}/%s' % implib.name, implib, self.env)
self.env.append_value('LINKFLAGS', (self.env['IMPLIB_ST'] % implib.bldpath(self.env)).split())
# ============ the code above must not know anything about vnum processing on unix platforms =========
@feature('cshlib')
@after('apply_link')
@before('apply_lib_vars', 'default_link_install')
def apply_vnum(self):
"""
libfoo.so is installed as libfoo.so.1.2.3
"""
if not getattr(self, 'vnum', '') or not 'cshlib' in self.features or os.name != 'posix' or self.env.DEST_BINFMT not in ('elf', 'mac-o'):
return
self.meths.remove('default_link_install')
link = self.link_task
nums = self.vnum.split('.')
node = link.outputs[0]
libname = node.name
if libname.endswith('.dylib'):
name3 = libname.replace('.dylib', '.%s.dylib' % self.vnum)
name2 = libname.replace('.dylib', '.%s.dylib' % nums[0])
else:
name3 = libname + '.' + self.vnum
name2 = libname + '.' + nums[0]
if self.env.SONAME_ST:
v = self.env.SONAME_ST % name2
self.env.append_value('LINKFLAGS', v.split())
bld = self.bld
nums = self.vnum.split('.')
path = self.install_path
if not path: return
if self.env.DEST_OS == 'openbsd':
libname = self.link_task.outputs[0].name
bld.install_as('%s%s%s' % (path, os.sep, libname), node, env=self.env)
else:
bld.install_as(path + os.sep + name3, node, env=self.env)
bld.symlink_as(path + os.sep + name2, name3)
bld.symlink_as(path + os.sep + libname, name3)
# the following task is just to enable execution from the build dir :-/
if self.env.DEST_OS != 'openbsd':
self.create_task('vnum', node, [node.parent.find_or_declare(name2), node.parent.find_or_declare(name3)])
def exec_vnum_link(self):
for x in self.outputs:
path = x.abspath(self.env)
try:
os.remove(path)
except OSError:
pass
try:
os.symlink(self.inputs[0].name, path)
except OSError:
return 1
cls = Task.task_type_from_func('vnum', func=exec_vnum_link, ext_in='.bin', color='CYAN')
cls.quiet = 1
# ============ the --as-needed flag should added during the configuration, not at runtime =========
@conftest
def add_as_needed(conf):
if conf.env.DEST_BINFMT == 'elf' and 'gcc' in (conf.env.CXX_NAME, conf.env.CC_NAME):
conf.env.append_unique('LINKFLAGS', '--as-needed')
| gpl-3.0 |
geodrinx/gearthview | ext-libs/twisted/runner/test/test_procmon.py | 49 | 16794 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.runner.procmon}.
"""
from twisted.trial import unittest
from twisted.runner.procmon import LoggingProtocol, ProcessMonitor
from twisted.internet.error import (ProcessDone, ProcessTerminated,
ProcessExitedAlready)
from twisted.internet.task import Clock
from twisted.python.failure import Failure
from twisted.test.proto_helpers import MemoryReactor
class DummyProcess(object):
"""
An incomplete and fake L{IProcessTransport} implementation for testing how
L{ProcessMonitor} behaves when its monitored processes exit.
@ivar _terminationDelay: the delay in seconds after which the DummyProcess
will appear to exit when it receives a TERM signal
"""
pid = 1
proto = None
_terminationDelay = 1
def __init__(self, reactor, executable, args, environment, path,
proto, uid=None, gid=None, usePTY=0, childFDs=None):
self.proto = proto
self._reactor = reactor
self._executable = executable
self._args = args
self._environment = environment
self._path = path
self._uid = uid
self._gid = gid
self._usePTY = usePTY
self._childFDs = childFDs
def signalProcess(self, signalID):
"""
A partial implementation of signalProcess which can only handle TERM and
KILL signals.
- When a TERM signal is given, the dummy process will appear to exit
after L{DummyProcess._terminationDelay} seconds with exit code 0
- When a KILL signal is given, the dummy process will appear to exit
immediately with exit code 1.
@param signalID: The signal name or number to be issued to the process.
@type signalID: C{str}
"""
params = {
"TERM": (self._terminationDelay, 0),
"KILL": (0, 1)
}
if self.pid is None:
raise ProcessExitedAlready()
if signalID in params:
delay, status = params[signalID]
self._signalHandler = self._reactor.callLater(
delay, self.processEnded, status)
def processEnded(self, status):
"""
Deliver the process ended event to C{self.proto}.
"""
self.pid = None
statusMap = {
0: ProcessDone,
1: ProcessTerminated,
}
self.proto.processEnded(Failure(statusMap[status](status)))
class DummyProcessReactor(MemoryReactor, Clock):
"""
@ivar spawnedProcesses: a list that keeps track of the fake process
instances built by C{spawnProcess}.
@type spawnedProcesses: C{list}
"""
def __init__(self):
MemoryReactor.__init__(self)
Clock.__init__(self)
self.spawnedProcesses = []
def spawnProcess(self, processProtocol, executable, args=(), env={},
path=None, uid=None, gid=None, usePTY=0,
childFDs=None):
"""
Fake L{reactor.spawnProcess}, that logs all the process
arguments and returns a L{DummyProcess}.
"""
proc = DummyProcess(self, executable, args, env, path,
processProtocol, uid, gid, usePTY, childFDs)
processProtocol.makeConnection(proc)
self.spawnedProcesses.append(proc)
return proc
class ProcmonTests(unittest.TestCase):
"""
Tests for L{ProcessMonitor}.
"""
def setUp(self):
"""
Create an L{ProcessMonitor} wrapped around a fake reactor.
"""
self.reactor = DummyProcessReactor()
self.pm = ProcessMonitor(reactor=self.reactor)
self.pm.minRestartDelay = 2
self.pm.maxRestartDelay = 10
self.pm.threshold = 10
def test_getStateIncludesProcesses(self):
"""
The list of monitored processes must be included in the pickle state.
"""
self.pm.addProcess("foo", ["arg1", "arg2"],
uid=1, gid=2, env={})
self.assertEqual(self.pm.__getstate__()['processes'],
{'foo': (['arg1', 'arg2'], 1, 2, {})})
def test_getStateExcludesReactor(self):
"""
The private L{ProcessMonitor._reactor} instance variable should not be
included in the pickle state.
"""
self.assertNotIn('_reactor', self.pm.__getstate__())
def test_addProcess(self):
"""
L{ProcessMonitor.addProcess} only starts the named program if
L{ProcessMonitor.startService} has been called.
"""
self.pm.addProcess("foo", ["arg1", "arg2"],
uid=1, gid=2, env={})
self.assertEqual(self.pm.protocols, {})
self.assertEqual(self.pm.processes,
{"foo": (["arg1", "arg2"], 1, 2, {})})
self.pm.startService()
self.reactor.advance(0)
self.assertEqual(self.pm.protocols.keys(), ["foo"])
def test_addProcessDuplicateKeyError(self):
"""
L{ProcessMonitor.addProcess} raises a C{KeyError} if a process with the
given name already exists.
"""
self.pm.addProcess("foo", ["arg1", "arg2"],
uid=1, gid=2, env={})
self.assertRaises(KeyError, self.pm.addProcess,
"foo", ["arg1", "arg2"], uid=1, gid=2, env={})
def test_addProcessEnv(self):
"""
L{ProcessMonitor.addProcess} takes an C{env} parameter that is passed to
L{IReactorProcess.spawnProcess}.
"""
fakeEnv = {"KEY": "value"}
self.pm.startService()
self.pm.addProcess("foo", ["foo"], uid=1, gid=2, env=fakeEnv)
self.reactor.advance(0)
self.assertEqual(
self.reactor.spawnedProcesses[0]._environment, fakeEnv)
def test_removeProcess(self):
"""
L{ProcessMonitor.removeProcess} removes the process from the public
processes list.
"""
self.pm.startService()
self.pm.addProcess("foo", ["foo"])
self.assertEqual(len(self.pm.processes), 1)
self.pm.removeProcess("foo")
self.assertEqual(len(self.pm.processes), 0)
def test_removeProcessUnknownKeyError(self):
"""
L{ProcessMonitor.removeProcess} raises a C{KeyError} if the given
process name isn't recognised.
"""
self.pm.startService()
self.assertRaises(KeyError, self.pm.removeProcess, "foo")
def test_startProcess(self):
"""
When a process has been started, an instance of L{LoggingProtocol} will
be added to the L{ProcessMonitor.protocols} dict and the start time of
the process will be recorded in the L{ProcessMonitor.timeStarted}
dictionary.
"""
self.pm.addProcess("foo", ["foo"])
self.pm.startProcess("foo")
self.assertIsInstance(self.pm.protocols["foo"], LoggingProtocol)
self.assertIn("foo", self.pm.timeStarted.keys())
def test_startProcessAlreadyStarted(self):
"""
L{ProcessMonitor.startProcess} silently returns if the named process is
already started.
"""
self.pm.addProcess("foo", ["foo"])
self.pm.startProcess("foo")
self.assertIdentical(None, self.pm.startProcess("foo"))
def test_startProcessUnknownKeyError(self):
"""
L{ProcessMonitor.startProcess} raises a C{KeyError} if the given
process name isn't recognised.
"""
self.assertRaises(KeyError, self.pm.startProcess, "foo")
def test_stopProcessNaturalTermination(self):
"""
L{ProcessMonitor.stopProcess} immediately sends a TERM signal to the
named process.
"""
self.pm.startService()
self.pm.addProcess("foo", ["foo"])
self.assertIn("foo", self.pm.protocols)
# Configure fake process to die 1 second after receiving term signal
timeToDie = self.pm.protocols["foo"].transport._terminationDelay = 1
# Advance the reactor to just before the short lived process threshold
# and leave enough time for the process to die
self.reactor.advance(self.pm.threshold)
# Then signal the process to stop
self.pm.stopProcess("foo")
# Advance the reactor just enough to give the process time to die and
# verify that the process restarts
self.reactor.advance(timeToDie)
# We expect it to be restarted immediately
self.assertEqual(self.reactor.seconds(),
self.pm.timeStarted["foo"])
def test_stopProcessForcedKill(self):
"""
L{ProcessMonitor.stopProcess} kills a process which fails to terminate
naturally within L{ProcessMonitor.killTime} seconds.
"""
self.pm.startService()
self.pm.addProcess("foo", ["foo"])
self.assertIn("foo", self.pm.protocols)
self.reactor.advance(self.pm.threshold)
proc = self.pm.protocols["foo"].transport
# Arrange for the fake process to live longer than the killTime
proc._terminationDelay = self.pm.killTime + 1
self.pm.stopProcess("foo")
# If process doesn't die before the killTime, procmon should
# terminate it
self.reactor.advance(self.pm.killTime - 1)
self.assertEqual(0.0, self.pm.timeStarted["foo"])
self.reactor.advance(1)
# We expect it to be immediately restarted
self.assertEqual(self.reactor.seconds(), self.pm.timeStarted["foo"])
def test_stopProcessUnknownKeyError(self):
"""
L{ProcessMonitor.stopProcess} raises a C{KeyError} if the given process
name isn't recognised.
"""
self.assertRaises(KeyError, self.pm.stopProcess, "foo")
def test_stopProcessAlreadyStopped(self):
"""
L{ProcessMonitor.stopProcess} silently returns if the named process
is already stopped. eg Process has crashed and a restart has been
rescheduled, but in the meantime, the service is stopped.
"""
self.pm.addProcess("foo", ["foo"])
self.assertIdentical(None, self.pm.stopProcess("foo"))
def test_connectionLostLongLivedProcess(self):
"""
L{ProcessMonitor.connectionLost} should immediately restart a process
if it has been running longer than L{ProcessMonitor.threshold} seconds.
"""
self.pm.addProcess("foo", ["foo"])
# Schedule the process to start
self.pm.startService()
# advance the reactor to start the process
self.reactor.advance(0)
self.assertIn("foo", self.pm.protocols)
# Long time passes
self.reactor.advance(self.pm.threshold)
# Process dies after threshold
self.pm.protocols["foo"].processEnded(Failure(ProcessDone(0)))
self.assertNotIn("foo", self.pm.protocols)
# Process should be restarted immediately
self.reactor.advance(0)
self.assertIn("foo", self.pm.protocols)
def test_connectionLostMurderCancel(self):
"""
L{ProcessMonitor.connectionLost} cancels a scheduled process killer and
deletes the DelayedCall from the L{ProcessMonitor.murder} list.
"""
self.pm.addProcess("foo", ["foo"])
# Schedule the process to start
self.pm.startService()
# Advance 1s to start the process then ask ProcMon to stop it
self.reactor.advance(1)
self.pm.stopProcess("foo")
# A process killer has been scheduled, delayedCall is active
self.assertIn("foo", self.pm.murder)
delayedCall = self.pm.murder["foo"]
self.assertTrue(delayedCall.active())
# Advance to the point at which the dummy process exits
self.reactor.advance(
self.pm.protocols["foo"].transport._terminationDelay)
# Now the delayedCall has been cancelled and deleted
self.assertFalse(delayedCall.active())
self.assertNotIn("foo", self.pm.murder)
def test_connectionLostProtocolDeletion(self):
"""
L{ProcessMonitor.connectionLost} removes the corresponding
ProcessProtocol instance from the L{ProcessMonitor.protocols} list.
"""
self.pm.startService()
self.pm.addProcess("foo", ["foo"])
self.assertIn("foo", self.pm.protocols)
self.pm.protocols["foo"].transport.signalProcess("KILL")
self.reactor.advance(
self.pm.protocols["foo"].transport._terminationDelay)
self.assertNotIn("foo", self.pm.protocols)
def test_connectionLostMinMaxRestartDelay(self):
"""
L{ProcessMonitor.connectionLost} will wait at least minRestartDelay s
and at most maxRestartDelay s
"""
self.pm.minRestartDelay = 2
self.pm.maxRestartDelay = 3
self.pm.startService()
self.pm.addProcess("foo", ["foo"])
self.assertEqual(self.pm.delay["foo"], self.pm.minRestartDelay)
self.reactor.advance(self.pm.threshold - 1)
self.pm.protocols["foo"].processEnded(Failure(ProcessDone(0)))
self.assertEqual(self.pm.delay["foo"], self.pm.maxRestartDelay)
def test_connectionLostBackoffDelayDoubles(self):
"""
L{ProcessMonitor.connectionLost} doubles the restart delay each time
the process dies too quickly.
"""
self.pm.startService()
self.pm.addProcess("foo", ["foo"])
self.reactor.advance(self.pm.threshold - 1) #9s
self.assertIn("foo", self.pm.protocols)
self.assertEqual(self.pm.delay["foo"], self.pm.minRestartDelay)
# process dies within the threshold and should not restart immediately
self.pm.protocols["foo"].processEnded(Failure(ProcessDone(0)))
self.assertEqual(self.pm.delay["foo"], self.pm.minRestartDelay * 2)
def test_startService(self):
"""
L{ProcessMonitor.startService} starts all monitored processes.
"""
self.pm.addProcess("foo", ["foo"])
# Schedule the process to start
self.pm.startService()
# advance the reactor to start the process
self.reactor.advance(0)
self.assertTrue("foo" in self.pm.protocols)
def test_stopService(self):
"""
L{ProcessMonitor.stopService} should stop all monitored processes.
"""
self.pm.addProcess("foo", ["foo"])
self.pm.addProcess("bar", ["bar"])
# Schedule the process to start
self.pm.startService()
# advance the reactor to start the processes
self.reactor.advance(self.pm.threshold)
self.assertIn("foo", self.pm.protocols)
self.assertIn("bar", self.pm.protocols)
self.reactor.advance(1)
self.pm.stopService()
# Advance to beyond the killTime - all monitored processes
# should have exited
self.reactor.advance(self.pm.killTime + 1)
# The processes shouldn't be restarted
self.assertEqual({}, self.pm.protocols)
def test_stopServiceCancelRestarts(self):
"""
L{ProcessMonitor.stopService} should cancel any scheduled process
restarts.
"""
self.pm.addProcess("foo", ["foo"])
# Schedule the process to start
self.pm.startService()
# advance the reactor to start the processes
self.reactor.advance(self.pm.threshold)
self.assertIn("foo", self.pm.protocols)
self.reactor.advance(1)
# Kill the process early
self.pm.protocols["foo"].processEnded(Failure(ProcessDone(0)))
self.assertTrue(self.pm.restart['foo'].active())
self.pm.stopService()
# Scheduled restart should have been cancelled
self.assertFalse(self.pm.restart['foo'].active())
def test_stopServiceCleanupScheduledRestarts(self):
"""
L{ProcessMonitor.stopService} should cancel all scheduled process
restarts.
"""
self.pm.threshold = 5
self.pm.minRestartDelay = 5
# Start service and add a process (started immediately)
self.pm.startService()
self.pm.addProcess("foo", ["foo"])
# Stop the process after 1s
self.reactor.advance(1)
self.pm.stopProcess("foo")
# Wait 1s for it to exit it will be scheduled to restart 5s later
self.reactor.advance(1)
# Meanwhile stop the service
self.pm.stopService()
# Advance to beyond the process restart time
self.reactor.advance(6)
# The process shouldn't have restarted because stopService has cancelled
# all pending process restarts.
self.assertEqual(self.pm.protocols, {})
| gpl-3.0 |
strint/tensorflow | tensorflow/python/user_ops/user_ops.py | 123 | 1138 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""All user ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import gen_user_ops as _gen_user_ops
# go/tf-wildcard-import
from tensorflow.python.ops.gen_user_ops import * # pylint: disable=wildcard-import
def my_fact():
"""Example of overriding the generated code for an Op."""
return _gen_user_ops._fact() # pylint: disable=protected-access
| apache-2.0 |
ujjvala-addsol/addsol_hr | openerp/addons/mrp/product.py | 131 | 4440 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class product_template(osv.osv):
_inherit = "product.template"
def _bom_orders_count(self, cr, uid, ids, field_name, arg, context=None):
Bom = self.pool('mrp.bom')
res = {}
for product_tmpl_id in ids:
nb = Bom.search_count(cr, uid, [('product_tmpl_id', '=', product_tmpl_id)], context=context)
res[product_tmpl_id] = {
'bom_count': nb,
}
return res
def _bom_orders_count_mo(self, cr, uid, ids, name, arg, context=None):
res = {}
for product_tmpl_id in self.browse(cr, uid, ids):
res[product_tmpl_id.id] = sum([p.mo_count for p in product_tmpl_id.product_variant_ids])
return res
_columns = {
'bom_ids': fields.one2many('mrp.bom', 'product_tmpl_id','Bill of Materials'),
'bom_count': fields.function(_bom_orders_count, string='# Bill of Material', type='integer', multi="_bom_order_count"),
'mo_count': fields.function(_bom_orders_count_mo, string='# Manufacturing Orders', type='integer'),
'produce_delay': fields.float('Manufacturing Lead Time', help="Average delay in days to produce this product. In the case of multi-level BOM, the manufacturing lead times of the components will be added."),
'track_production': fields.boolean('Track Manufacturing Lots', help="Forces to specify a Serial Number for all moves containing this product and generated by a Manufacturing Order"),
}
_defaults = {
'produce_delay': 1,
}
def action_view_mos(self, cr, uid, ids, context=None):
products = self._get_products(cr, uid, ids, context=context)
result = self._get_act_window_dict(cr, uid, 'mrp.act_product_mrp_production', context=context)
if len(ids) == 1 and len(products) == 1:
result['context'] = "{'default_product_id': " + str(products[0]) + ", 'search_default_product_id': " + str(products[0]) + "}"
else:
result['domain'] = "[('product_id','in',[" + ','.join(map(str, products)) + "])]"
result['context'] = "{}"
return result
class product_product(osv.osv):
_inherit = "product.product"
def _bom_orders_count(self, cr, uid, ids, field_name, arg, context=None):
Production = self.pool('mrp.production')
res = {}
for product_id in ids:
res[product_id] = Production.search_count(cr,uid, [('product_id', '=', product_id)], context=context)
return res
_columns = {
'mo_count': fields.function(_bom_orders_count, string='# Manufacturing Orders', type='integer'),
}
def action_view_bom(self, cr, uid, ids, context=None):
tmpl_obj = self.pool.get("product.template")
products = set()
for product in self.browse(cr, uid, ids, context=context):
products.add(product.product_tmpl_id.id)
result = tmpl_obj._get_act_window_dict(cr, uid, 'mrp.product_open_bom', context=context)
# bom specific to this variant or global to template
domain = [
'|',
('product_id', 'in', ids),
'&',
('product_id', '=', False),
('product_tmpl_id', 'in', list(products)),
]
result['context'] = "{}"
result['domain'] = str(domain)
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
forslund/mycroft-core | mycroft/client/speech/listener.py | 2 | 16442 | # Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from threading import Thread
import speech_recognition as sr
import pyaudio
from pyee import EventEmitter
from requests import RequestException
from requests.exceptions import ConnectionError
from mycroft import dialog
from mycroft.client.speech.hotword_factory import HotWordFactory
from mycroft.client.speech.mic import MutableMicrophone, ResponsiveRecognizer
from mycroft.configuration import Configuration
from mycroft.metrics import MetricsAggregator, Stopwatch, report_timing
from mycroft.session import SessionManager
from mycroft.stt import STTFactory
from mycroft.util import connected
from mycroft.util.log import LOG
from mycroft.util import find_input_device
from queue import Queue, Empty
import json
from copy import deepcopy
MAX_MIC_RESTARTS = 20
AUDIO_DATA = 0
STREAM_START = 1
STREAM_DATA = 2
STREAM_STOP = 3
class AudioStreamHandler(object):
def __init__(self, queue):
self.queue = queue
def stream_start(self):
self.queue.put((STREAM_START, None))
def stream_chunk(self, chunk):
self.queue.put((STREAM_DATA, chunk))
def stream_stop(self):
self.queue.put((STREAM_STOP, None))
class AudioProducer(Thread):
"""AudioProducer
Given a mic and a recognizer implementation, continuously listens to the
mic for potential speech chunks and pushes them onto the queue.
"""
def __init__(self, state, queue, mic, recognizer, emitter, stream_handler):
super(AudioProducer, self).__init__()
self.daemon = True
self.state = state
self.queue = queue
self.mic = mic
self.recognizer = recognizer
self.emitter = emitter
self.stream_handler = stream_handler
def run(self):
restart_attempts = 0
with self.mic as source:
self.recognizer.adjust_for_ambient_noise(source)
while self.state.running:
try:
audio = self.recognizer.listen(source, self.emitter,
self.stream_handler)
if audio is not None:
self.queue.put((AUDIO_DATA, audio))
else:
LOG.warning("Audio contains no data.")
except IOError as e:
# IOError will be thrown if the read is unsuccessful.
# If self.recognizer.overflow_exc is False (default)
# input buffer overflow IOErrors due to not consuming the
# buffers quickly enough will be silently ignored.
LOG.exception('IOError Exception in AudioProducer')
if e.errno == pyaudio.paInputOverflowed:
pass # Ignore overflow errors
elif restart_attempts < MAX_MIC_RESTARTS:
# restart the mic
restart_attempts += 1
LOG.info('Restarting the microphone...')
source.restart()
LOG.info('Restarted...')
else:
LOG.error('Restarting mic doesn\'t seem to work. '
'Stopping...')
raise
except Exception:
LOG.exception('Exception in AudioProducer')
raise
else:
# Reset restart attempt counter on sucessful audio read
restart_attempts = 0
finally:
if self.stream_handler is not None:
self.stream_handler.stream_stop()
def stop(self):
"""Stop producer thread."""
self.state.running = False
self.recognizer.stop()
class AudioConsumer(Thread):
"""AudioConsumer
Consumes AudioData chunks off the queue
"""
# In seconds, the minimum audio size to be sent to remote STT
MIN_AUDIO_SIZE = 0.5
def __init__(self, state, queue, emitter, stt,
wakeup_recognizer, wakeword_recognizer):
super(AudioConsumer, self).__init__()
self.daemon = True
self.queue = queue
self.state = state
self.emitter = emitter
self.stt = stt
self.wakeup_recognizer = wakeup_recognizer
self.wakeword_recognizer = wakeword_recognizer
self.metrics = MetricsAggregator()
def run(self):
while self.state.running:
self.read()
def read(self):
try:
message = self.queue.get(timeout=0.5)
except Empty:
return
if message is None:
return
tag, data = message
if tag == AUDIO_DATA:
if data is not None:
if self.state.sleeping:
self.wake_up(data)
else:
self.process(data)
elif tag == STREAM_START:
self.stt.stream_start()
elif tag == STREAM_DATA:
self.stt.stream_data(data)
elif tag == STREAM_STOP:
self.stt.stream_stop()
else:
LOG.error("Unknown audio queue type %r" % message)
# TODO: Localization
def wake_up(self, audio):
if self.wakeup_recognizer.found_wake_word(audio.frame_data):
SessionManager.touch()
self.state.sleeping = False
self.emitter.emit('recognizer_loop:awoken')
self.metrics.increment("mycroft.wakeup")
@staticmethod
def _audio_length(audio):
return float(len(audio.frame_data)) / (
audio.sample_rate * audio.sample_width)
# TODO: Localization
def process(self, audio):
if self._audio_length(audio) >= self.MIN_AUDIO_SIZE:
stopwatch = Stopwatch()
with stopwatch:
transcription = self.transcribe(audio)
if transcription:
ident = str(stopwatch.timestamp) + str(hash(transcription))
# STT succeeded, send the transcribed speech on for processing
payload = {
'utterances': [transcription],
'lang': self.stt.lang,
'session': SessionManager.get().session_id,
'ident': ident
}
self.emitter.emit("recognizer_loop:utterance", payload)
self.metrics.attr('utterances', [transcription])
# Report timing metrics
report_timing(ident, 'stt', stopwatch,
{'transcription': transcription,
'stt': self.stt.__class__.__name__})
else:
ident = str(stopwatch.timestamp)
else:
LOG.warning("Audio too short to be processed")
def transcribe(self, audio):
def send_unknown_intent():
""" Send message that nothing was transcribed. """
self.emitter.emit('recognizer_loop:speech.recognition.unknown')
try:
# Invoke the STT engine on the audio clip
text = self.stt.execute(audio)
if text is not None:
text = text.lower().strip()
LOG.debug("STT: " + text)
else:
send_unknown_intent()
LOG.info('no words were transcribed')
return text
except sr.RequestError as e:
LOG.error("Could not request Speech Recognition {0}".format(e))
except ConnectionError as e:
LOG.error("Connection Error: {0}".format(e))
self.emitter.emit("recognizer_loop:no_internet")
except RequestException as e:
LOG.error(e.__class__.__name__ + ': ' + str(e))
except Exception as e:
send_unknown_intent()
LOG.error(e)
LOG.error("Speech Recognition could not understand audio")
return None
if connected():
dialog_name = 'backend.down'
else:
dialog_name = 'not connected to the internet'
self.emitter.emit('speak', {'utterance': dialog.get(dialog_name)})
def __speak(self, utterance):
payload = {
'utterance': utterance,
'session': SessionManager.get().session_id
}
self.emitter.emit("speak", payload)
class RecognizerLoopState:
def __init__(self):
self.running = False
self.sleeping = False
def recognizer_conf_hash(config):
"""Hash of the values important to the listener."""
c = {
'listener': config.get('listener'),
'hotwords': config.get('hotwords'),
'stt': config.get('stt'),
'opt_in': config.get('opt_in', False)
}
return hash(json.dumps(c, sort_keys=True))
class RecognizerLoop(EventEmitter):
""" EventEmitter loop running speech recognition.
Local wake word recognizer and remote general speech recognition.
Args:
watchdog: (callable) function to call periodically indicating
operational status.
"""
def __init__(self, watchdog=None):
super(RecognizerLoop, self).__init__()
self._watchdog = watchdog
self.mute_calls = 0
self._load_config()
def _load_config(self):
"""Load configuration parameters from configuration."""
config = Configuration.get()
self.config_core = config
self._config_hash = recognizer_conf_hash(config)
self.lang = config.get('lang')
self.config = config.get('listener')
rate = self.config.get('sample_rate')
device_index = self.config.get('device_index')
device_name = self.config.get('device_name')
if not device_index and device_name:
device_index = find_input_device(device_name)
LOG.debug('Using microphone (None = default): '+str(device_index))
self.microphone = MutableMicrophone(device_index, rate,
mute=self.mute_calls > 0)
self.wakeword_recognizer = self.create_wake_word_recognizer()
# TODO - localization
self.wakeup_recognizer = self.create_wakeup_recognizer()
self.responsive_recognizer = ResponsiveRecognizer(
self.wakeword_recognizer, self._watchdog)
self.state = RecognizerLoopState()
def create_wake_word_recognizer(self):
"""Create a local recognizer to hear the wakeup word
For example 'Hey Mycroft'.
The method uses the hotword entry for the selected wakeword, if
one is missing it will fall back to the old phoneme and threshold in
the listener entry in the config.
If the hotword entry doesn't include phoneme and threshold values these
will be patched in using the defaults from the config listnere entry.
"""
LOG.info('Creating wake word engine')
word = self.config.get('wake_word', 'hey mycroft')
# TODO remove this, only for server settings compatibility
phonemes = self.config.get('phonemes')
thresh = self.config.get('threshold')
# Since we're editing it for server backwards compatibility
# use a copy so we don't alter the hash of the config and
# trigger a reload.
config = deepcopy(self.config_core.get('hotwords', {}))
if word not in config:
# Fallback to using config from "listener" block
LOG.warning('Wakeword doesn\'t have an entry falling back'
'to old listener config')
config[word] = {'module': 'precise'}
if phonemes:
config[word]['phonemes'] = phonemes
if thresh:
config[word]['threshold'] = thresh
if phonemes is None or thresh is None:
config = None
else:
LOG.info('Using hotword entry for {}'.format(word))
if 'phonemes' not in config[word]:
LOG.warning('Phonemes are missing falling back to listeners '
'configuration')
config[word]['phonemes'] = phonemes
if 'threshold' not in config[word]:
LOG.warning('Threshold is missing falling back to listeners '
'configuration')
config[word]['threshold'] = thresh
return HotWordFactory.create_hotword(word, config, self.lang,
loop=self)
def create_wakeup_recognizer(self):
LOG.info("creating stand up word engine")
word = self.config.get("stand_up_word", "wake up")
return HotWordFactory.create_hotword(word, lang=self.lang, loop=self)
def start_async(self):
"""Start consumer and producer threads."""
self.state.running = True
stt = STTFactory.create()
queue = Queue()
stream_handler = None
if stt.can_stream:
stream_handler = AudioStreamHandler(queue)
self.producer = AudioProducer(self.state, queue, self.microphone,
self.responsive_recognizer, self,
stream_handler)
self.producer.start()
self.consumer = AudioConsumer(self.state, queue, self,
stt, self.wakeup_recognizer,
self.wakeword_recognizer)
self.consumer.start()
def stop(self):
self.state.running = False
self.producer.stop()
# wait for threads to shutdown
self.producer.join()
self.consumer.join()
def mute(self):
"""Mute microphone and increase number of requests to mute."""
self.mute_calls += 1
if self.microphone:
self.microphone.mute()
def unmute(self):
"""Unmute mic if as many unmute calls as mute calls have been received.
"""
if self.mute_calls > 0:
self.mute_calls -= 1
if self.mute_calls <= 0 and self.microphone:
self.microphone.unmute()
self.mute_calls = 0
def force_unmute(self):
"""Completely unmute mic regardless of the number of calls to mute."""
self.mute_calls = 0
self.unmute()
def is_muted(self):
if self.microphone:
return self.microphone.is_muted()
else:
return True # consider 'no mic' muted
def sleep(self):
self.state.sleeping = True
def awaken(self):
self.state.sleeping = False
def run(self):
"""Start and reload mic and STT handling threads as needed.
Wait for KeyboardInterrupt and shutdown cleanly.
"""
try:
self.start_async()
except Exception:
LOG.exception('Starting producer/consumer threads for listener '
'failed.')
return
# Handle reload of consumer / producer if config changes
while self.state.running:
try:
time.sleep(1)
current_hash = recognizer_conf_hash(Configuration().get())
if current_hash != self._config_hash:
self._config_hash = current_hash
LOG.debug('Config has changed, reloading...')
self.reload()
except KeyboardInterrupt as e:
LOG.error(e)
self.stop()
raise # Re-raise KeyboardInterrupt
except Exception:
LOG.exception('Exception in RecognizerLoop')
raise
def reload(self):
"""Reload configuration and restart consumer and producer."""
self.stop()
self.wakeword_recognizer.stop()
# load config
self._load_config()
# restart
self.start_async()
| apache-2.0 |
rahlk/CSC579__Computer_Performance_Modeling | simulation/proj2/Utils/SimUtil.py | 1 | 5781 | from __future__ import print_function
from __future__ import division
from RndUtil import Random
from pdb import set_trace
from MscUtil import Params, Customer
rand = Random()
rand.set_seed(seed_val=1729)
"""
Why 1729?
Well, it is a very interesting number.
It is the smallest number expressible as the sum of two positive cubes in
two different ways.
"""
class Simulation:
def __init__(self, params=Params):
"Initialize inital params"
self.queue = list()
self.priority_queue = {"1": list(), "2": list(), "3": list(),
"4": list()}
self.params = params
self.customers = list()
self.t_depart = float('inf')
self.num_in_system = len(self.queue)
self.t_arrive = self.generate_interarrival()
self.clock = 0
self.num_arrive = 0
self.num_depart = 0
self.num_reject = 0
self.num_serviced = 0
self.total_wait = sum([c.get_wait_time() for c in self.customers])
@staticmethod
def random_queue():
random = rand.uniform()
if 0 < random < 0.25:
return "1"
elif 0.25 < random < 0.5:
return "2"
elif 0.5 < random < 0.75:
return "3"
elif 0.75 < random < 1:
return "4"
@staticmethod
def all_empty(queue):
sizes = [len(q) for _, q in queue.iteritems()]
return sum(sizes) == 0
def generate_interarrival(self):
return rand.exponential(self.params.rho)
def generate_service_time(self):
return rand.exponential(self.params.lmbd)
def prioritize(self):
if self.params.service_type == "FCFS":
for k, _ in enumerate(self.queue):
c = self.queue[k]
c.priority = k
self.queue[k] = c
return self.queue
elif self.params.service_type == "LCFS":
for k, _ in enumerate(self.queue):
c = self.queue[k]
c.priority = len(self.queue) - k
self.queue[k] = c
return self.queue
elif self.params.service_type == "SJF":
sorted_args = sorted(self.queue, key=lambda X: X.service_time)
def find(n):
for i, c in enumerate(self.queue):
if c == n:
return i
for k, n in enumerate(sorted_args):
c = self.queue[find(n)]
c.priority = k
self.queue[find(n)] = c
return self.queue
def handle_arrive_event(self):
self.num_arrive += 1 # Increment arrival
c = Customer(id=self.num_arrive)
self.t_arrive = self.clock + self.generate_interarrival()
c.arrival_time = self.t_arrive
# If the queue is full kick out the customer
if len(self.queue) >= self.params.K:
c.depart_time = c.arrival_time
self.t_depart = c.depart_time
self.num_reject += 1
c.serviced = False
self.customers.append(c)
# Else add to queue
else:
c.serviced = True
c.service_time = self.generate_service_time()
self.queue.append(c)
self.queue = self.prioritize()
def handle_depart_event(self):
self.num_depart += 1
if len(self.queue) > 0:
self.num_serviced += 1
sorted_queue = sorted(self.queue, key=lambda C: C.priority)
c = sorted_queue.pop(0)
c.depart_time = self.clock + c.service_time
self.t_depart = c.depart_time
self.customers.append(c)
self.queue = [c for c in self.queue if c in sorted_queue]
else:
self.t_depart = float("inf")
def handle_multiqueue_arrive(self):
self.num_arrive += 1 # Increment arrival
c = Customer(id=self.num_arrive)
self.t_arrive = self.clock + self.generate_interarrival()
c.arrival_time = self.t_arrive
q_id = self.random_queue()
# If the queue is full kick out the customer
if len(self.priority_queue[q_id]) >= self.params.K / 4:
c.depart_time = c.arrival_time
self.t_depart = c.depart_time
self.num_reject += 1
c.serviced = False
self.customers.append(c)
# Else add to queue
else:
c.serviced = True
c.service_time = self.generate_service_time()
c.q_id = q_id
self.priority_queue[q_id].append(c)
def handle_multiqueue_depart(self):
self.num_depart += 1
for q_id, queue in self.priority_queue.iteritems():
if len(queue) > 0:
self.num_serviced += 1
c = queue.pop(0)
c.depart_time = self.clock + c.service_time
self.t_depart = c.depart_time
self.customers.append(c)
break
if self.all_empty(self.priority_queue):
self.t_depart = float("inf")
def advance_time(self):
self.clock = min(self.t_arrive, self.t_depart)
if self.t_arrive < self.t_depart:
if self.params.service_type == "PrioNP" \
or self.params.service_type == "PrioP":
self.handle_multiqueue_arrive()
else:
self.handle_arrive_event()
else:
if self.params.service_type == "PrioNP" \
or self.params.service_type == "PrioP":
self.handle_multiqueue_depart()
else:
self.handle_depart_event()
def run_simulation(self):
while self.num_depart < self.params.C:
self.advance_time()
return self
| mit |
pitrou/numba | numba/tests/test_closure.py | 4 | 2890 | from __future__ import print_function
import sys
import numba.unittest_support as unittest
from numba import jit, testing
from .support import TestCase
class TestClosure(TestCase):
def run_jit_closure_variable(self, **jitargs):
Y = 10
def add_Y(x):
return x + Y
c_add_Y = jit('i4(i4)', **jitargs)(add_Y)
self.assertEqual(c_add_Y(1), 11)
# Like globals in Numba, the value of the closure is captured
# at time of JIT
Y = 12 # should not affect function
self.assertEqual(c_add_Y(1), 11)
def test_jit_closure_variable(self):
self.run_jit_closure_variable(forceobj=True)
def test_jit_closure_variable_npm(self):
self.run_jit_closure_variable(nopython=True)
def run_rejitting_closure(self, **jitargs):
Y = 10
def add_Y(x):
return x + Y
c_add_Y = jit('i4(i4)', **jitargs)(add_Y)
self.assertEqual(c_add_Y(1), 11)
# Redo the jit
Y = 12
c_add_Y_2 = jit('i4(i4)', **jitargs)(add_Y)
self.assertEqual(c_add_Y_2(1), 13)
Y = 13 # should not affect function
self.assertEqual(c_add_Y_2(1), 13)
self.assertEqual(c_add_Y(1), 11) # Test first function again
def test_rejitting_closure(self):
self.run_rejitting_closure(forceobj=True)
def test_rejitting_closure_npm(self):
self.run_rejitting_closure(nopython=True)
def run_jit_multiple_closure_variables(self, **jitargs):
Y = 10
Z = 2
def add_Y_mult_Z(x):
return (x + Y) * Z
c_add_Y_mult_Z = jit('i4(i4)', **jitargs)(add_Y_mult_Z)
self.assertEqual(c_add_Y_mult_Z(1), 22)
def test_jit_multiple_closure_variables(self):
self.run_jit_multiple_closure_variables(forceobj=True)
def test_jit_multiple_closure_variables_npm(self):
self.run_jit_multiple_closure_variables(nopython=True)
def run_jit_inner_function(self, **jitargs):
def mult_10(a):
return a * 10
c_mult_10 = jit('intp(intp)', **jitargs)(mult_10)
c_mult_10.disable_compile()
def do_math(x):
return c_mult_10(x + 4)
c_do_math = jit('intp(intp)', **jitargs)(do_math)
c_do_math.disable_compile()
with self.assertRefCount(c_do_math, c_mult_10):
self.assertEqual(c_do_math(1), 50)
def test_jit_inner_function(self):
self.run_jit_inner_function(forceobj=True)
def test_jit_inner_function_npm(self):
self.run_jit_inner_function(nopython=True)
@testing.allow_interpreter_mode
def test_return_closure(self):
def outer(x):
def inner():
return x + 1
return inner
cfunc = jit(outer)
self.assertEqual(cfunc(10)(), outer(10)())
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
wkschwartz/django | tests/one_to_one/tests.py | 19 | 22386 | from django.db import IntegrityError, connection, transaction
from django.test import TestCase
from .models import (
Bar, Director, Favorites, HiddenPointer, ManualPrimaryKey, MultiModel,
Place, Pointer, RelatedModel, Restaurant, School, Target, ToFieldPointer,
UndergroundBar, Waiter,
)
class OneToOneTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.p1 = Place.objects.create(name='Demon Dogs', address='944 W. Fullerton')
cls.p2 = Place.objects.create(name='Ace Hardware', address='1013 N. Ashland')
cls.r1 = Restaurant.objects.create(place=cls.p1, serves_hot_dogs=True, serves_pizza=False)
cls.b1 = Bar.objects.create(place=cls.p1, serves_cocktails=False)
def test_getter(self):
# A Restaurant can access its place.
self.assertEqual(repr(self.r1.place), '<Place: Demon Dogs the place>')
# A Place can access its restaurant, if available.
self.assertEqual(repr(self.p1.restaurant), '<Restaurant: Demon Dogs the restaurant>')
# p2 doesn't have an associated restaurant.
with self.assertRaisesMessage(Restaurant.DoesNotExist, 'Place has no restaurant'):
self.p2.restaurant
# The exception raised on attribute access when a related object
# doesn't exist should be an instance of a subclass of `AttributeError`
# refs #21563
self.assertFalse(hasattr(self.p2, 'restaurant'))
def test_setter(self):
# Set the place using assignment notation. Because place is the primary
# key on Restaurant, the save will create a new restaurant
self.r1.place = self.p2
self.r1.save()
self.assertEqual(repr(self.p2.restaurant), '<Restaurant: Ace Hardware the restaurant>')
self.assertEqual(repr(self.r1.place), '<Place: Ace Hardware the place>')
self.assertEqual(self.p2.pk, self.r1.pk)
# Set the place back again, using assignment in the reverse direction.
self.p1.restaurant = self.r1
self.assertEqual(repr(self.p1.restaurant), '<Restaurant: Demon Dogs the restaurant>')
r = Restaurant.objects.get(pk=self.p1.id)
self.assertEqual(repr(r.place), '<Place: Demon Dogs the place>')
def test_manager_all(self):
# Restaurant.objects.all() just returns the Restaurants, not the Places.
self.assertSequenceEqual(Restaurant.objects.all(), [self.r1])
# Place.objects.all() returns all Places, regardless of whether they
# have Restaurants.
self.assertSequenceEqual(Place.objects.order_by('name'), [self.p2, self.p1])
def test_manager_get(self):
def assert_get_restaurant(**params):
self.assertEqual(repr(Restaurant.objects.get(**params)),
'<Restaurant: Demon Dogs the restaurant>')
assert_get_restaurant(place__id__exact=self.p1.pk)
assert_get_restaurant(place__id=self.p1.pk)
assert_get_restaurant(place__exact=self.p1.pk)
assert_get_restaurant(place__exact=self.p1)
assert_get_restaurant(place=self.p1.pk)
assert_get_restaurant(place=self.p1)
assert_get_restaurant(pk=self.p1.pk)
assert_get_restaurant(place__pk__exact=self.p1.pk)
assert_get_restaurant(place__pk=self.p1.pk)
assert_get_restaurant(place__name__startswith="Demon")
def assert_get_place(**params):
self.assertEqual(repr(Place.objects.get(**params)),
'<Place: Demon Dogs the place>')
assert_get_place(restaurant__place__exact=self.p1.pk)
assert_get_place(restaurant__place__exact=self.p1)
assert_get_place(restaurant__place__pk=self.p1.pk)
assert_get_place(restaurant__exact=self.p1.pk)
assert_get_place(restaurant__exact=self.r1)
assert_get_place(restaurant__pk=self.p1.pk)
assert_get_place(restaurant=self.p1.pk)
assert_get_place(restaurant=self.r1)
assert_get_place(id__exact=self.p1.pk)
assert_get_place(pk=self.p1.pk)
def test_foreign_key(self):
# Add a Waiter to the Restaurant.
w = self.r1.waiter_set.create(name='Joe')
self.assertEqual(repr(w), '<Waiter: Joe the waiter at Demon Dogs the restaurant>')
# Query the waiters
def assert_filter_waiters(**params):
self.assertSequenceEqual(Waiter.objects.filter(**params), [w])
assert_filter_waiters(restaurant__place__exact=self.p1.pk)
assert_filter_waiters(restaurant__place__exact=self.p1)
assert_filter_waiters(restaurant__place__pk=self.p1.pk)
assert_filter_waiters(restaurant__exact=self.r1.pk)
assert_filter_waiters(restaurant__exact=self.r1)
assert_filter_waiters(restaurant__pk=self.r1.pk)
assert_filter_waiters(restaurant=self.r1.pk)
assert_filter_waiters(restaurant=self.r1)
assert_filter_waiters(id__exact=w.pk)
assert_filter_waiters(pk=w.pk)
# Delete the restaurant; the waiter should also be removed
r = Restaurant.objects.get(pk=self.r1.pk)
r.delete()
self.assertEqual(Waiter.objects.count(), 0)
def test_multiple_o2o(self):
# One-to-one fields still work if you create your own primary key
o1 = ManualPrimaryKey(primary_key="abc123", name="primary")
o1.save()
o2 = RelatedModel(link=o1, name="secondary")
o2.save()
# You can have multiple one-to-one fields on a model, too.
x1 = MultiModel(link1=self.p1, link2=o1, name="x1")
x1.save()
self.assertEqual(repr(o1.multimodel), '<MultiModel: Multimodel x1>')
# This will fail because each one-to-one field must be unique (and
# link2=o1 was used for x1, above).
mm = MultiModel(link1=self.p2, link2=o1, name="x1")
with self.assertRaises(IntegrityError):
with transaction.atomic():
mm.save()
def test_unsaved_object(self):
"""
#10811 -- Assigning an unsaved object to a OneToOneField
should raise an exception.
"""
place = Place(name='User', address='London')
with self.assertRaises(Restaurant.DoesNotExist):
place.restaurant
msg = "save() prohibited to prevent data loss due to unsaved related object 'place'."
with self.assertRaisesMessage(ValueError, msg):
Restaurant.objects.create(place=place, serves_hot_dogs=True, serves_pizza=False)
# place should not cache restaurant
with self.assertRaises(Restaurant.DoesNotExist):
place.restaurant
def test_reverse_relationship_cache_cascade(self):
"""
Regression test for #9023: accessing the reverse relationship shouldn't
result in a cascading delete().
"""
bar = UndergroundBar.objects.create(place=self.p1, serves_cocktails=False)
# The bug in #9023: if you access the one-to-one relation *before*
# setting to None and deleting, the cascade happens anyway.
self.p1.undergroundbar
bar.place.name = 'foo'
bar.place = None
bar.save()
self.p1.delete()
self.assertEqual(Place.objects.all().count(), 1)
self.assertEqual(UndergroundBar.objects.all().count(), 1)
def test_create_models_m2m(self):
"""
Models are created via the m2m relation if the remote model has a
OneToOneField (#1064, #1506).
"""
f = Favorites(name='Fred')
f.save()
f.restaurants.set([self.r1])
self.assertSequenceEqual(f.restaurants.all(), [self.r1])
def test_reverse_object_cache(self):
"""
The name of the cache for the reverse object is correct (#7173).
"""
self.assertEqual(self.p1.restaurant, self.r1)
self.assertEqual(self.p1.bar, self.b1)
def test_assign_none_reverse_relation(self):
p = Place.objects.get(name="Demon Dogs")
# Assigning None succeeds if field is null=True.
ug_bar = UndergroundBar.objects.create(place=p, serves_cocktails=False)
p.undergroundbar = None
self.assertIsNone(ug_bar.place)
ug_bar.save()
ug_bar.refresh_from_db()
self.assertIsNone(ug_bar.place)
def test_assign_none_null_reverse_relation(self):
p = Place.objects.get(name="Demon Dogs")
# Assigning None doesn't throw AttributeError if there isn't a related
# UndergroundBar.
p.undergroundbar = None
def test_assign_none_to_null_cached_reverse_relation(self):
p = Place.objects.get(name='Demon Dogs')
# Prime the relation's cache with a value of None.
with self.assertRaises(Place.undergroundbar.RelatedObjectDoesNotExist):
getattr(p, 'undergroundbar')
# Assigning None works if there isn't a related UndergroundBar and the
# reverse cache has a value of None.
p.undergroundbar = None
def test_assign_o2o_id_value(self):
b = UndergroundBar.objects.create(place=self.p1)
b.place_id = self.p2.pk
b.save()
self.assertEqual(b.place_id, self.p2.pk)
self.assertFalse(UndergroundBar.place.is_cached(b))
self.assertEqual(b.place, self.p2)
self.assertTrue(UndergroundBar.place.is_cached(b))
# Reassigning the same value doesn't clear a cached instance.
b.place_id = self.p2.pk
self.assertTrue(UndergroundBar.place.is_cached(b))
def test_assign_o2o_id_none(self):
b = UndergroundBar.objects.create(place=self.p1)
b.place_id = None
b.save()
self.assertIsNone(b.place_id)
self.assertFalse(UndergroundBar.place.is_cached(b))
self.assertIsNone(b.place)
self.assertTrue(UndergroundBar.place.is_cached(b))
def test_related_object_cache(self):
""" Regression test for #6886 (the related-object cache) """
# Look up the objects again so that we get "fresh" objects
p = Place.objects.get(name="Demon Dogs")
r = p.restaurant
# Accessing the related object again returns the exactly same object
self.assertIs(p.restaurant, r)
# But if we kill the cache, we get a new object
del p._state.fields_cache['restaurant']
self.assertIsNot(p.restaurant, r)
# Reassigning the Restaurant object results in an immediate cache update
# We can't use a new Restaurant because that'll violate one-to-one, but
# with a new *instance* the is test below will fail if #6886 regresses.
r2 = Restaurant.objects.get(pk=r.pk)
p.restaurant = r2
self.assertIs(p.restaurant, r2)
# Assigning None succeeds if field is null=True.
ug_bar = UndergroundBar.objects.create(place=p, serves_cocktails=False)
ug_bar.place = None
self.assertIsNone(ug_bar.place)
# Assigning None will not fail: Place.restaurant is null=False
setattr(p, 'restaurant', None)
# You also can't assign an object of the wrong type here
msg = (
'Cannot assign "<Place: Demon Dogs the place>": '
'"Place.restaurant" must be a "Restaurant" instance.'
)
with self.assertRaisesMessage(ValueError, msg):
setattr(p, 'restaurant', p)
# Creation using keyword argument should cache the related object.
p = Place.objects.get(name="Demon Dogs")
r = Restaurant(place=p)
self.assertIs(r.place, p)
# Creation using keyword argument and unsaved related instance (#8070).
p = Place()
r = Restaurant(place=p)
self.assertIs(r.place, p)
# Creation using attname keyword argument and an id will cause the related
# object to be fetched.
p = Place.objects.get(name="Demon Dogs")
r = Restaurant(place_id=p.id)
self.assertIsNot(r.place, p)
self.assertEqual(r.place, p)
def test_filter_one_to_one_relations(self):
"""
Regression test for #9968
filtering reverse one-to-one relations with primary_key=True was
misbehaving. We test both (primary_key=True & False) cases here to
prevent any reappearance of the problem.
"""
target = Target.objects.create()
self.assertSequenceEqual(Target.objects.filter(pointer=None), [target])
self.assertSequenceEqual(Target.objects.exclude(pointer=None), [])
self.assertSequenceEqual(Target.objects.filter(second_pointer=None), [target])
self.assertSequenceEqual(Target.objects.exclude(second_pointer=None), [])
def test_o2o_primary_key_delete(self):
t = Target.objects.create(name='name')
Pointer.objects.create(other=t)
num_deleted, objs = Pointer.objects.filter(other__name='name').delete()
self.assertEqual(num_deleted, 1)
self.assertEqual(objs, {'one_to_one.Pointer': 1})
def test_save_nullable_o2o_after_parent(self):
place = Place(name='Rose tattoo')
bar = UndergroundBar(place=place)
place.save()
bar.save()
bar.refresh_from_db()
self.assertEqual(bar.place, place)
def test_reverse_object_does_not_exist_cache(self):
"""
Regression for #13839 and #17439.
DoesNotExist on a reverse one-to-one relation is cached.
"""
p = Place(name='Zombie Cats', address='Not sure')
p.save()
with self.assertNumQueries(1):
with self.assertRaises(Restaurant.DoesNotExist):
p.restaurant
with self.assertNumQueries(0):
with self.assertRaises(Restaurant.DoesNotExist):
p.restaurant
def test_reverse_object_cached_when_related_is_accessed(self):
"""
Regression for #13839 and #17439.
The target of a one-to-one relation is cached
when the origin is accessed through the reverse relation.
"""
# Use a fresh object without caches
r = Restaurant.objects.get(pk=self.r1.pk)
p = r.place
with self.assertNumQueries(0):
self.assertEqual(p.restaurant, r)
def test_related_object_cached_when_reverse_is_accessed(self):
"""
Regression for #13839 and #17439.
The origin of a one-to-one relation is cached
when the target is accessed through the reverse relation.
"""
# Use a fresh object without caches
p = Place.objects.get(pk=self.p1.pk)
r = p.restaurant
with self.assertNumQueries(0):
self.assertEqual(r.place, p)
def test_reverse_object_cached_when_related_is_set(self):
"""
Regression for #13839 and #17439.
The target of a one-to-one relation is always cached.
"""
p = Place(name='Zombie Cats', address='Not sure')
p.save()
self.r1.place = p
self.r1.save()
with self.assertNumQueries(0):
self.assertEqual(p.restaurant, self.r1)
def test_reverse_object_cached_when_related_is_unset(self):
"""
Regression for #13839 and #17439.
The target of a one-to-one relation is always cached.
"""
b = UndergroundBar(place=self.p1, serves_cocktails=True)
b.save()
with self.assertNumQueries(0):
self.assertEqual(self.p1.undergroundbar, b)
b.place = None
b.save()
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
self.p1.undergroundbar
def test_get_reverse_on_unsaved_object(self):
"""
Regression for #18153 and #19089.
Accessing the reverse relation on an unsaved object
always raises an exception.
"""
p = Place()
# When there's no instance of the origin of the one-to-one
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
p.undergroundbar
UndergroundBar.objects.create()
# When there's one instance of the origin
# (p.undergroundbar used to return that instance)
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
p.undergroundbar
# Several instances of the origin are only possible if database allows
# inserting multiple NULL rows for a unique constraint
if connection.features.supports_nullable_unique_constraints:
UndergroundBar.objects.create()
# When there are several instances of the origin
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
p.undergroundbar
def test_set_reverse_on_unsaved_object(self):
"""
Writing to the reverse relation on an unsaved object
is impossible too.
"""
p = Place()
b = UndergroundBar.objects.create()
# Assigning a reverse relation on an unsaved object is allowed.
p.undergroundbar = b
# However saving the object is not allowed.
msg = "save() prohibited to prevent data loss due to unsaved related object 'place'."
with self.assertNumQueries(0):
with self.assertRaisesMessage(ValueError, msg):
b.save()
def test_nullable_o2o_delete(self):
u = UndergroundBar.objects.create(place=self.p1)
u.place_id = None
u.save()
self.p1.delete()
self.assertTrue(UndergroundBar.objects.filter(pk=u.pk).exists())
self.assertIsNone(UndergroundBar.objects.get(pk=u.pk).place)
def test_hidden_accessor(self):
"""
When a '+' ending related name is specified no reverse accessor should
be added to the related model.
"""
self.assertFalse(
hasattr(Target, HiddenPointer._meta.get_field('target').remote_field.get_accessor_name())
)
def test_related_object(self):
public_school = School.objects.create(is_public=True)
public_director = Director.objects.create(school=public_school, is_temp=False)
private_school = School.objects.create(is_public=False)
private_director = Director.objects.create(school=private_school, is_temp=True)
# Only one school is available via all() due to the custom default manager.
self.assertSequenceEqual(School.objects.all(), [public_school])
# Only one director is available via all() due to the custom default manager.
self.assertSequenceEqual(Director.objects.all(), [public_director])
self.assertEqual(public_director.school, public_school)
self.assertEqual(public_school.director, public_director)
# Make sure the base manager is used so that the related objects
# is still accessible even if the default manager doesn't normally
# allow it.
self.assertEqual(private_director.school, private_school)
# Make sure the base manager is used so that an student can still access
# its related school even if the default manager doesn't normally
# allow it.
self.assertEqual(private_school.director, private_director)
School._meta.base_manager_name = 'objects'
School._meta._expire_cache()
try:
private_director = Director._base_manager.get(pk=private_director.pk)
with self.assertRaises(School.DoesNotExist):
private_director.school
finally:
School._meta.base_manager_name = None
School._meta._expire_cache()
Director._meta.base_manager_name = 'objects'
Director._meta._expire_cache()
try:
private_school = School._base_manager.get(pk=private_school.pk)
with self.assertRaises(Director.DoesNotExist):
private_school.director
finally:
Director._meta.base_manager_name = None
Director._meta._expire_cache()
def test_hasattr_related_object(self):
# The exception raised on attribute access when a related object
# doesn't exist should be an instance of a subclass of `AttributeError`
# refs #21563
self.assertFalse(hasattr(Director(), 'director'))
self.assertFalse(hasattr(School(), 'school'))
def test_update_one_to_one_pk(self):
p1 = Place.objects.create()
p2 = Place.objects.create()
r1 = Restaurant.objects.create(place=p1)
r2 = Restaurant.objects.create(place=p2)
w = Waiter.objects.create(restaurant=r1)
Waiter.objects.update(restaurant=r2)
w.refresh_from_db()
self.assertEqual(w.restaurant, r2)
def test_rel_pk_subquery(self):
r = Restaurant.objects.first()
q1 = Restaurant.objects.filter(place_id=r.pk)
# Subquery using primary key and a query against the
# same model works correctly.
q2 = Restaurant.objects.filter(place_id__in=q1)
self.assertSequenceEqual(q2, [r])
# Subquery using 'pk__in' instead of 'place_id__in' work, too.
q2 = Restaurant.objects.filter(
pk__in=Restaurant.objects.filter(place__id=r.place.pk)
)
self.assertSequenceEqual(q2, [r])
q3 = Restaurant.objects.filter(place__in=Place.objects.all())
self.assertSequenceEqual(q3, [r])
q4 = Restaurant.objects.filter(place__in=Place.objects.filter(id=r.pk))
self.assertSequenceEqual(q4, [r])
def test_rel_pk_exact(self):
r = Restaurant.objects.first()
r2 = Restaurant.objects.filter(pk__exact=r).first()
self.assertEqual(r, r2)
def test_primary_key_to_field_filter(self):
target = Target.objects.create(name='foo')
pointer = ToFieldPointer.objects.create(target=target)
self.assertSequenceEqual(ToFieldPointer.objects.filter(target=target), [pointer])
self.assertSequenceEqual(ToFieldPointer.objects.filter(pk__exact=pointer), [pointer])
def test_cached_relation_invalidated_on_save(self):
"""
Model.save() invalidates stale OneToOneField relations after a primary
key assignment.
"""
self.assertEqual(self.b1.place, self.p1) # caches b1.place
self.b1.place_id = self.p2.pk
self.b1.save()
self.assertEqual(self.b1.place, self.p2)
| bsd-3-clause |
flwh/KK_mt6589_iq451 | prebuilts/python/darwin-x86/2.7.5/lib/python2.7/test/test_copy_reg.py | 129 | 4256 | import copy_reg
import unittest
from test import test_support
from test.pickletester import ExtensionSaver
class C:
pass
class WithoutSlots(object):
pass
class WithWeakref(object):
__slots__ = ('__weakref__',)
class WithPrivate(object):
__slots__ = ('__spam',)
class WithSingleString(object):
__slots__ = 'spam'
class WithInherited(WithSingleString):
__slots__ = ('eggs',)
class CopyRegTestCase(unittest.TestCase):
def test_class(self):
self.assertRaises(TypeError, copy_reg.pickle,
C, None, None)
def test_noncallable_reduce(self):
self.assertRaises(TypeError, copy_reg.pickle,
type(1), "not a callable")
def test_noncallable_constructor(self):
self.assertRaises(TypeError, copy_reg.pickle,
type(1), int, "not a callable")
def test_bool(self):
import copy
self.assertEqual(True, copy.copy(True))
def test_extension_registry(self):
mod, func, code = 'junk1 ', ' junk2', 0xabcd
e = ExtensionSaver(code)
try:
# Shouldn't be in registry now.
self.assertRaises(ValueError, copy_reg.remove_extension,
mod, func, code)
copy_reg.add_extension(mod, func, code)
# Should be in the registry.
self.assertTrue(copy_reg._extension_registry[mod, func] == code)
self.assertTrue(copy_reg._inverted_registry[code] == (mod, func))
# Shouldn't be in the cache.
self.assertNotIn(code, copy_reg._extension_cache)
# Redundant registration should be OK.
copy_reg.add_extension(mod, func, code) # shouldn't blow up
# Conflicting code.
self.assertRaises(ValueError, copy_reg.add_extension,
mod, func, code + 1)
self.assertRaises(ValueError, copy_reg.remove_extension,
mod, func, code + 1)
# Conflicting module name.
self.assertRaises(ValueError, copy_reg.add_extension,
mod[1:], func, code )
self.assertRaises(ValueError, copy_reg.remove_extension,
mod[1:], func, code )
# Conflicting function name.
self.assertRaises(ValueError, copy_reg.add_extension,
mod, func[1:], code)
self.assertRaises(ValueError, copy_reg.remove_extension,
mod, func[1:], code)
# Can't remove one that isn't registered at all.
if code + 1 not in copy_reg._inverted_registry:
self.assertRaises(ValueError, copy_reg.remove_extension,
mod[1:], func[1:], code + 1)
finally:
e.restore()
# Shouldn't be there anymore.
self.assertNotIn((mod, func), copy_reg._extension_registry)
# The code *may* be in copy_reg._extension_registry, though, if
# we happened to pick on a registered code. So don't check for
# that.
# Check valid codes at the limits.
for code in 1, 0x7fffffff:
e = ExtensionSaver(code)
try:
copy_reg.add_extension(mod, func, code)
copy_reg.remove_extension(mod, func, code)
finally:
e.restore()
# Ensure invalid codes blow up.
for code in -1, 0, 0x80000000L:
self.assertRaises(ValueError, copy_reg.add_extension,
mod, func, code)
def test_slotnames(self):
self.assertEqual(copy_reg._slotnames(WithoutSlots), [])
self.assertEqual(copy_reg._slotnames(WithWeakref), [])
expected = ['_WithPrivate__spam']
self.assertEqual(copy_reg._slotnames(WithPrivate), expected)
self.assertEqual(copy_reg._slotnames(WithSingleString), ['spam'])
expected = ['eggs', 'spam']
expected.sort()
result = copy_reg._slotnames(WithInherited)
result.sort()
self.assertEqual(result, expected)
def test_main():
test_support.run_unittest(CopyRegTestCase)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
ABaldwinHunter/django-clone-classic | django/views/decorators/cache.py | 586 | 2304 | from functools import wraps
from django.middleware.cache import CacheMiddleware
from django.utils.cache import add_never_cache_headers, patch_cache_control
from django.utils.decorators import (
available_attrs, decorator_from_middleware_with_args,
)
def cache_page(*args, **kwargs):
"""
Decorator for views that tries getting the page from the cache and
populates the cache if the page isn't in the cache yet.
The cache is keyed by the URL and some data from the headers.
Additionally there is the key prefix that is used to distinguish different
cache areas in a multi-site setup. You could use the
get_current_site().domain, for example, as that is unique across a Django
project.
Additionally, all headers from the response's Vary header will be taken
into account on caching -- just like the middleware does.
"""
# We also add some asserts to give better error messages in case people are
# using other ways to call cache_page that no longer work.
if len(args) != 1 or callable(args[0]):
raise TypeError("cache_page has a single mandatory positional argument: timeout")
cache_timeout = args[0]
cache_alias = kwargs.pop('cache', None)
key_prefix = kwargs.pop('key_prefix', None)
if kwargs:
raise TypeError("cache_page has two optional keyword arguments: cache and key_prefix")
return decorator_from_middleware_with_args(CacheMiddleware)(
cache_timeout=cache_timeout, cache_alias=cache_alias, key_prefix=key_prefix
)
def cache_control(**kwargs):
def _cache_controller(viewfunc):
@wraps(viewfunc, assigned=available_attrs(viewfunc))
def _cache_controlled(request, *args, **kw):
response = viewfunc(request, *args, **kw)
patch_cache_control(response, **kwargs)
return response
return _cache_controlled
return _cache_controller
def never_cache(view_func):
"""
Decorator that adds headers to a response so that it will
never be cached.
"""
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view_func(request, *args, **kwargs):
response = view_func(request, *args, **kwargs)
add_never_cache_headers(response)
return response
return _wrapped_view_func
| bsd-3-clause |
pybquillast/xkAddonIDE | toRecycle/CodigoEncodedURL.py | 1 | 16126 | '''
Created on 13/07/2014
@author: Alex Montes Barrios
'''
import math
class gledajfilmDecrypter:
def __init__(self, param1, param2):
_loc3_ = False;
_loc4_ = True;
self.Rcon = [1,2,4,8,16,32,64,128,27,54,108,216,171,77,154,47,94,188,99,198,151,53,106,212,179,125,250,239,197,145];
self.SBox = [99,124,119,123,242,107,111,197,48,1,103,43,254,215,171,118,202,130,201,125,250,89,71,240,173,212,162,175,156,164,114,192,183,253,147,38,54,63,247,204,52,165,229,241,113,216,49,21,4,199,35,195,24,150,5,154,7,18,128,226,235,39,178,117,9,131,44,26,27,110,90,160,82,59,214,179,41,227,47,132,83,209,0,237,32,252,177,91,106,203,190,57,74,76,88,207,208,239,170,251,67,77,51,133,69,249,2,127,80,60,159,168,81,163,64,143,146,157,56,245,188,182,218,33,16,255,243,210,205,12,19,236,95,151,68,23,196,167,126,61,100,93,25,115,96,129,79,220,34,42,144,136,70,238,184,20,222,94,11,219,224,50,58,10,73,6,36,92,194,211,172,98,145,149,228,121,231,200,55,109,141,213,78,169,108,86,244,234,101,122,174,8,186,120,37,46,28,166,180,198,232,221,116,31,75,189,139,138,112,62,181,102,72,3,246,14,97,53,87,185,134,193,29,158,225,248,152,17,105,217,142,148,155,30,135,233,206,85,40,223,140,161,137,13,191,230,66,104,65,153,45,15,176,84,187,22];
self.SBoxInverse = [82,9,106,213,48,54,165,56,191,64,163,158,129,243,215,251,124,227,57,130,155,47,255,135,52,142,67,68,196,222,233,203,84,123,148,50,166,194,35,61,238,76,149,11,66,250,195,78,8,46,161,102,40,217,36,178,118,91,162,73,109,139,209,37,114,248,246,100,134,104,152,22,212,164,92,204,93,101,182,146,108,112,72,80,253,237,185,218,94,21,70,87,167,141,157,132,144,216,171,0,140,188,211,10,247,228,88,5,184,179,69,6,208,44,30,143,202,63,15,2,193,175,189,3,1,19,138,107,58,145,17,65,79,103,220,234,151,242,207,206,240,180,230,115,150,172,116,34,231,173,53,133,226,249,55,232,28,117,223,110,71,241,26,113,29,41,197,137,111,183,98,14,170,24,190,27,252,86,62,75,198,210,121,32,154,219,192,254,120,205,90,244,31,221,168,51,136,7,199,49,177,18,16,89,39,128,236,95,96,81,127,169,25,181,74,13,45,229,122,159,147,201,156,239,160,224,59,77,174,42,245,176,200,235,187,60,131,83,153,97,23,43,4,126,186,119,214,38,225,105,20,99,85,33,12,125];
self.keySize = param1;
self.blockSize = param2;
self.roundsArray = [0,0,0,0,[0,0,0,0,10,0,12,0,14],0,[0,0,0,0,12,0,12,0,14],0,[0,0,0,0,14,0,14,0,14]];
self.shiftOffsets = [0,0,0,0,[0,1,2,3],0,[0,1,2,3],0,[0,1,3,4]];
self.Nb = param2 / 32;
self.Nk = param1 / 32;
self.Nr = self.roundsArray[self.Nk][self.Nb];
def decrypt(self,param1, param2, param3):
_loc11_ = True;
_loc12_ = False;
_loc10_ = None;
_loc4_ = []
_loc5_ = []
_loc6_ = self.hexToChars(param1);#==48 characters
_loc7_ = self.blockSize / 8;
##print self.strToChars(param2)
##print 'hexToChars',_loc6_, 'count is ',len(_loc6_);
_lo8st=self.strToChars(param2);
##print 'strToChars',_lo8st, 'count is ',len(_lo8st);
_loc8_ = self.keyExpansion(_lo8st);
##print 'keyExpansion 8', _loc8_, ' len is ', len(_loc8_);
#return 1/0
_loc9_ = (len(_loc6_) / _loc7_)-1;
#print 'loc 9 is ',_loc9_
while _loc9_ > 0:
# #print _loc9_ * _loc7_,(_loc9_ + 1) * _loc7_
_loc5_ = self.decryption(_loc6_[_loc9_ * _loc7_:(_loc9_ + 1) * _loc7_],_loc8_);
#print '16 portion',_loc5_
_loc4_=_loc5_+(_loc4_)
_loc9_-=1;
#print 'now string',_loc4_, 'count is ',len(_loc4_);
#if(param3 == 'ECB'):
##print _loc6_[0:int(_loc7_)]
#now add last stage here
_loc44= self.decryption(_loc6_[0:int(_loc7_)],_loc8_)
#print 'last 16bit',_loc44,' Count is ', len(_loc44)
_loc4_ =_loc44+_loc4_;
#print 'NOW _loc4_ string',_loc4_, 'count is ',len(_loc4_);
_loc4_= self.charsToStr(_loc4_);
_loop_=0;
_patternArray=[];
_finalString= "http://allmyvideos.net/9b7ccumgfrui";
#while( _loop_<len(_finalString)):
# _patternArray.append(ord(_finalString[_loop_]) - ord(_loc4_[_loop_]));
# _loop_+=1;
##print 'Pattern is ',_patternArray
#
#__Pattern = [-16, 54, 78, 13, 16, -152, 40, -121, 48, 36, -88, 33, 97, 45, -58,-128, 41, -41, -22, -58, -97, 24, -164, -64, 97, -169, -69, -46, -126, -55, 19,14, 79, 53, -11]
_loop_=0
#_loc4_=list(_loc4_);
#while( _loop_<len(__Pattern)):
# #print chr( ord(_loc4_[_loop_]) + __Pattern[_loop_]);
# _loc4_[_loop_]= chr( ord(_loc4_[_loop_]) + __Pattern[_loop_]);
# _loop_+=1;
#_loc4_="".join(_loc4_)
return _loc4_;
def MyInt(self,x):
x = 0xffffffff & x
if x > 0x7fffffff :
return - ( ~(x - 1) & 0xffffffff )
else : return x
def keyExpansion(self,param1):
_loc5_ = True
_loc6_ = False
_loc4_ = None
_loc2_ = 0
self.Nk = self.keySize / 32;# =6, what if this was 5
self.Nb = self.blockSize / 32;
_loc3_ = [];
self.Nr = self.roundsArray[self.Nk][self.Nb];# ==12, what if this was 10?
_loc4_ = 0;
##print 'Key param1 is',param1
#print self.Nr,1,self.Nb, self.Nk
_loc3_=[0]*(self.Nb * (self.Nr + 1))
#param1=param1+[0,0,0,0]
##print len(_loc3_);
##print _loc3_
while _loc4_ < self.Nk:
# #print self.Nk
# #print _loc4_
# #print param1
# #print param1[4 * _loc4_ + 3] << 24;
if (_loc4_)<len(param1)/4:
_loc3_[_loc4_] = param1[4 * _loc4_] | param1[4 * _loc4_ + 1] << 8 | param1[4 * _loc4_ + 2] << 16 | param1[4 * _loc4_ + 3] << 24;
_loc4_+=1;
_loc4_ = self.Nk;
while _loc4_ < self.Nb * (self.Nr + 1):
_loc2_ = _loc3_[_loc4_-1];
# #print 'val for loc4',_loc4_, _loc2_
if(_loc4_ % self.Nk == 0):
# #print 'here',(self.SBox[_loc2_ >> 8 & 255] | self.SBox[_loc2_ >> 16 & 255] << 8 | self.SBox[_loc2_ >> 24 & 255] << 16 | self.SBox[_loc2_ & 255] << 24)
##print (self.SBox[_loc2_ >> 8 & 255] | self.SBox[_loc2_ >> 16 & 255] << 8 | self.SBox[_loc2_ >> 24 & 255] << 16 | self.SBox[_loc2_ & 255] << 24)
##print math.floor(_loc4_ / self.Nk)-1
_loc2_ = (self.SBox[_loc2_ >> 8 & 255] | self.SBox[_loc2_ >> 16 & 255] << 8 | self.SBox[_loc2_ >> 24 & 255] << 16 | self.SBox[_loc2_ & 255] << 24) ^ self.Rcon[int(math.floor(_loc4_ / self.Nk))-1];
else:
if(self.Nk > 6 and _loc4_ % self.Nk == 4):
_loc2_ = self.SBox[_loc2_ >> 24 & 255] << 24 | self.SBox[_loc2_ >> 16 & 255] << 16 | self.SBox[_loc2_ >> 8 & 255] << 8 | self.SBox[_loc2_ & 255];
# #print 'val is ',self.MyInt(_loc3_[_loc4_ - self.Nk] ^ _loc2_)
_loc3_[_loc4_] = self.MyInt(_loc3_[_loc4_ - self.Nk] ^ _loc2_)
_loc4_+=1;
return _loc3_;
def hexToChars(self,param1):
_loc4_ = False;
_loc5_ = True;
_loc2_ = []
_loc3_ =0;
if param1[0:1] == '0x':
_loc3_ =2;
while _loc3_ < len(param1):
# #print param1[_loc3_:_loc3_+2]
_loc2_.append(int(param1[_loc3_:_loc3_+2],16));
_loc3_ = _loc3_ + 2;
return _loc2_;
def strToChars(self,param1):
_loc4_ = True;
_loc5_ = False;
_loc2_ = []
_loc3_ = 0;
##print 'p1 is',param1,' and len is ', len(param1)
while(_loc3_ < len(param1)):
# #print param1[_loc3_]
_loc2_.append(ord(param1[_loc3_]));
_loc3_+=1;
return _loc2_;
def charsToStr(self,param1):
_loc4_ = False;
_loc5_ = True;
_loc2_ = ''
_loc3_ = 0;
while(_loc3_ < len(param1)):
_loc2_ = _loc2_ + chr(param1[_loc3_])
_loc3_+=1;
return _loc2_;
def packBytes(self,param1):
_loc4_ = False;
_loc5_ = True;
_loc2_ = [[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]
# _loc2_[0] = []
# _loc2_[1] = []
# _loc2_[2] = []
# _loc2_[3] = [];
_loc3_ = 0;
##print len(param1)
while(_loc3_ < len(param1)):
_loc2_[0][_loc3_ / 4] = param1[_loc3_];
_loc2_[1][_loc3_ / 4] = param1[_loc3_ + 1];
_loc2_[2][_loc3_ / 4] = param1[_loc3_ + 2];
_loc2_[3][_loc3_ / 4] = param1[_loc3_ + 3];
_loc3_ = _loc3_ + 4;
return _loc2_;
def unpackBytes(self,param1):
_loc4_= False;
_loc5_ = True;
_loc2_ = []
_loc3_ = 0;
#print 'unpackBytesval is is ',param1
while(_loc3_ < len(param1[0])):
_loc2_.append( param1[0][_loc3_]);
_loc2_.append(param1[1][_loc3_]);
_loc2_.append(param1[2][_loc3_]);
_loc2_.append(param1[3][_loc3_]);
_loc3_+=1;
return _loc2_;
def InverseRound(self,param1, param2):
_loc3_ = False;
_loc4_ = True;
#print 'Ircound is',param1,param2
self.addRoundKey(param1,param2);
#print 'Ircound back is',param1,param2
self.mixColumn(param1,'decrypt');
self.shiftRow(param1,'decrypt');
self.byteSub(param1,'decrypt');
def FinalRound(self,param1, param2):
_loc3_ = False;
_loc4_ = True;
self.byteSub(param1,'encrypt');
self.shiftRow(param1,'encrypt');
self.addRoundKey(param1,param2);
def InverseFinalRound(self,param1, param2):
_loc3_ = False;
_loc4_ = True;
self.addRoundKey(param1,param2);
self.shiftRow(param1,'decrypt');
##print 'InverseFinalRound byteSubbefore',param1
self.byteSub(param1,'decrypt');
##print 'InverseFinalRound byteSub after',param1
def addRoundKey(self,param1, param2):
_loc4_ = True;
_loc5_ = False;
_loc3_ = 0;
#print 'addRoundKeys is', param1,param2
while(_loc3_ < self.Nb):
#print param1[0][_loc3_] , param2[_loc3_] & 255;
param1[0][_loc3_] = self.MyInt(param1[0][_loc3_] ^ (param2[_loc3_] & 255));
param1[1][_loc3_] = param1[1][_loc3_] ^ param2[_loc3_] >> 8 & 255;
param1[2][_loc3_] = param1[2][_loc3_] ^ param2[_loc3_] >> 16 & 255;
param1[3][_loc3_] = param1[3][_loc3_] ^ param2[_loc3_] >> 24 & 255;
_loc3_+=1;
def shiftRow(self,param1, param2):
_loc4_ = True;
_loc5_ = False;
_loc3_ = 1;
##print'#print p1 is ',param1,'p2 is ', param2
while(_loc3_ < 4):
if(param2 == 'encrypt'):
param1[_loc3_] = self.cyclicShiftLeft(param1[_loc3_],self.shiftOffsets[self.Nb][_loc3_]);
else:
##print 'self nb is,',self.Nb,'offsets are' ,self.Nb- self.shiftOffsets[self.Nb][_loc3_]
param1[_loc3_] = self.cyclicShiftLeft(param1[_loc3_],self.Nb - self.shiftOffsets[self.Nb][_loc3_]);
_loc3_+=1;
##print'aaa#print p1 is ',param1,'p2 is ', param2
def cyclicShiftLeft(self,param1, param2):
_loc4_ = False;
_loc5_ = True;
_loc3_ = param1[0:param2];
##print 'loc3 is'
##print _loc3_
##print 'param1 is'
##print param1
param1=param1[param2:];
param1.extend(_loc3_);
#print ' cyclicShiftLeft val is', param1
return param1;
def decryption(self,param1, param2):
_loc4_ = True;
_loc5_ = False;
#print param1
param1 = self.packBytes(param1);
self.InverseFinalRound(param1,param2[self.Nb * self.Nr:]);# nb*nr=42
##print param1
_loc3_ = self.Nr-1;
while(_loc3_ > 0):
self.InverseRound(param1,param2[(self.Nb * _loc3_):self.Nb * (_loc3_ + 1)]);
_loc3_-=1;
#print 'addRoundKey', param1,param2
self.addRoundKey(param1,param2);
reVal=self.unpackBytes(param1);
#print ' decryption reVal',param1, reVal
return reVal;
def byteSub(self,param1, param2):
_loc6_ = False;
_loc7_ = True;
_loc3_ = 0;
_loc5_ = 0;
if(param2 == 'encrypt'):
_loc3_ = self.SBox;
else:
_loc3_ = self.SBoxInverse;
_loc4_ = 0;
while(_loc4_ < 4):
_loc5_ = 0;
##print _loc4_
while(_loc5_ < self.Nb):
##print 'param1 is'
##print param1
##print 'loc3 is'
##print _loc3_
##print '5 is ' +str(_loc5_)
param1[_loc4_][_loc5_] = _loc3_[param1[_loc4_][_loc5_]];
_loc5_+=1;
_loc4_+=1;
def mixColumn(self,param1, param2):
_loc6_ = False;
_loc7_ = True;
_loc4_ = 0;
_loc3_ = [0,0,0,0];
_loc5_ = 0;
#print 'mixColumn is',param1, param2
while(_loc5_ < self.Nb):
_loc4_ = 0;
while(_loc4_ < 4):
if(param2 == "encrypt"):
_loc3_[_loc4_] = self.mult_GF256(param1[_loc4_][_loc5_],2) ^ self.mult_GF256(param1[(_loc4_ + 1) % 4][_loc5_],3) ^ param1[(_loc4_ + 2) % 4][_loc5_] ^ param1[(_loc4_ + 3) % 4][_loc5_];
else:
_loc3_[_loc4_] = self.mult_GF256(param1[_loc4_][_loc5_],14) ^ self.mult_GF256(param1[(_loc4_ + 1) % 4][_loc5_],11) ^ self.mult_GF256(param1[(_loc4_ + 2) % 4][_loc5_],13) ^ self.mult_GF256(param1[(_loc4_ + 3) % 4][_loc5_],9);
_loc4_+=1;
_loc4_ = 0;
while(_loc4_ < 4):
param1[_loc4_][_loc5_] = _loc3_[_loc4_];
_loc4_+=1;
_loc5_+=1;
def xtime(self,param1):
_loc2_ = False;
_loc3_ = True;
# #print 'ppp1 is',param1;
param1 = param1 << 1;
if param1 & 256:
return param1 ^ 283
else:
return param1;
def mult_GF256(self,param1, param2):
_loc5_ = True;
_loc6_ = False;
_loc3_ = 0;
_loc4_ = 1;
#print 'mult_GF256 is',param1,'p2 is',param2
while(_loc4_ < 256):
if(param1 & _loc4_):
_loc3_ = _loc3_ ^ param2;
_loc4_ = _loc4_ * 2;
param2 = self.xtime(param2);
##print 'xtime P2 is',param2
#print 'mult_GF256',_loc3_
return _loc3_;
def hexToChars(param1):
_loc4_ = False;
_loc5_ = True;
_loc2_ = []
_loc3_ =0;
if param1[0:1] == '0x':
_loc3_ =2;
while _loc3_ < len(param1):
#print int(param1[_loc3_:_loc3_+1],16)
_loc2_.append(int(param1[_loc3_:_loc3_+1],16));
_loc3_ = _loc3_ + 2;
return "".join(_loc2_);
def arrNametoString(param1):
_loc4_ = True;
_loc5_ = False;
_loc2_ = "";
param1.reverse();
_loc3_ = 0;
while(_loc3_ < len(param1)):
_loc2_ = _loc2_ + chr(param1[_loc3_]);
_loc3_+=1;
return _loc2_;
#df236814880713e784e099b26a27569fb9891e1e1a5a32a56df1a33b5a68373014ed2e4a02be5bdb415663799435e606
#df236814880713e784e099b26a27569fb9891e1e1a5a32a56df1a33b5a68373014ed2e4a02be5bdb415663799435e606
| gpl-3.0 |
wolffcm/voltdb | tools/getgitinfo.py | 26 | 3846 | #!/usr/bin/python
from subprocess import Popen, PIPE
import sys
import re
def getSvnInfo():
"""Make a build string for svn
Returns a string or None, of not in an svn repository"""
(urlbase, gitHash, dirty) = ("","","")
(svnStatus,stderr) = Popen("svn status", shell=True,stdout=PIPE, stderr=PIPE).communicate()
# svn status returns an error if you're not in a repository
if stderr:
print "This not an svn working copy"
return
if svnStatus:
print "This is a dirty svn working copy"
dirty = "-dirty"
(svnInfo,stderr) = Popen("svn info 2>/dev/null",shell=True, stdout=PIPE).communicate()
for line in str.splitlines(svnInfo):
if not len(line):
continue
(k, v) = line.split(": ")
if k == "URL": urlbase = v
if k == "Revision": revision = v
return "%s?revision=%s%s" % (urlbase, revision, dirty)
def getGitInfo():
"""Make a build string for svn
Returns a string or None if not in a git repository"""
(gitLocalVersion, local) = ("","")
# need to do a 'git diff' because 'describe --dirty' can get confused by timestamps
(gitLocalVersion,stderr) = Popen("git diff --shortstat", shell=True, stdout=PIPE, stderr=PIPE).communicate()
if stderr:
print "This is not a git working tree\n"
return
# git describe --dirty adds '-dirty' to the version string if uncommitted code is found
(gitLocalVersion,stderr) = Popen("git describe --long --dirty", shell=True, stdout=PIPE, stderr=PIPE).communicate()
if stderr:
print "This is not a git working tree\n"
return
# jenkins puts in local tags - look backwards until a non-jenkins tag is found
while gitLocalVersion[:7] == "jenkins":
gitLocalVersion = gitLocalVersion.strip()
if gitLocalVersion[len(gitLocalVersion)-6:] == "-dirty":
gitLocalVersion = gitLocalVersion[:len(gitLocalVersion)-6]
(gitLocalVersion,stderr) = Popen("git describe --long %s^1" % gitLocalVersion,
shell=True, stdout=PIPE, stderr=PIPE).communicate()
if stderr:
print stderr
break
gitLocalVersion = gitLocalVersion.strip()
#check if local repository == remote repository
(gitLocalBranch, stderr) = Popen("git name-rev --name-only HEAD",
shell=True, stdout=PIPE, stderr=PIPE).communicate()
gitLocalBranch = gitLocalBranch.strip()
#print "git config --get branch.%s.remote" % (gitLocalBranch)
(gitRemote, stderr) = Popen("git config --get branch.%s.remote" % (gitLocalBranch),
shell=True, stdout=PIPE, stderr=PIPE).communicate()
gitRemote = gitRemote.strip()
if not gitRemote: #if there is no remote, then this is a local-only branch
local = "-local"
else: # if there is a remote branch, see if it has the same hash
(gitRemoteVersion, stderr) = Popen("git describe %s" % gitRemote ,
shell=True, stdout=PIPE, stderr=PIPE).communicate()
gitRemoteVersion = gitRemoteVersion.strip()
if gitRemoteVersion != gitLocalVersion[:len(gitRemoteVersion)]:
local = "-local"
return "%s%s" % (gitLocalVersion, local)
if __name__ == "__main__":
buildstring = None
version = "0.0.0"
if (len(sys.argv) > 1):
version = sys.argv[1]
buildstring = getGitInfo()
if not buildstring:
buildstring = getSvnInfo()
if not buildstring:
buildstring = "This is not from a known repository"
bfile = open("buildstring.txt", "w")
bfile.write ("%s %s\n"% (version,buildstring))
bfile.close()
print "Version: ",version,buildstring
| agpl-3.0 |
vv1133/home_web | django/contrib/auth/tokens.py | 296 | 2631 | from datetime import date
from django.conf import settings
from django.utils.http import int_to_base36, base36_to_int
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils import six
class PasswordResetTokenGenerator(object):
"""
Strategy object used to generate and check tokens for the password
reset mechanism.
"""
def make_token(self, user):
"""
Returns a token that can be used once to do a password reset
for the given user.
"""
return self._make_token_with_timestamp(user, self._num_days(self._today()))
def check_token(self, user, token):
"""
Check that a password reset token is correct for a given user.
"""
# Parse the token
try:
ts_b36, hash = token.split("-")
except ValueError:
return False
try:
ts = base36_to_int(ts_b36)
except ValueError:
return False
# Check that the timestamp/uid has not been tampered with
if not constant_time_compare(self._make_token_with_timestamp(user, ts), token):
return False
# Check the timestamp is within limit
if (self._num_days(self._today()) - ts) > settings.PASSWORD_RESET_TIMEOUT_DAYS:
return False
return True
def _make_token_with_timestamp(self, user, timestamp):
# timestamp is number of days since 2001-1-1. Converted to
# base 36, this gives us a 3 digit string until about 2121
ts_b36 = int_to_base36(timestamp)
# By hashing on the internal state of the user and using state
# that is sure to change (the password salt will change as soon as
# the password is set, at least for current Django auth, and
# last_login will also change), we produce a hash that will be
# invalid as soon as it is used.
# We limit the hash to 20 chars to keep URL short
key_salt = "django.contrib.auth.tokens.PasswordResetTokenGenerator"
# Ensure results are consistent across DB backends
login_timestamp = user.last_login.replace(microsecond=0, tzinfo=None)
value = (six.text_type(user.pk) + user.password +
six.text_type(login_timestamp) + six.text_type(timestamp))
hash = salted_hmac(key_salt, value).hexdigest()[::2]
return "%s-%s" % (ts_b36, hash)
def _num_days(self, dt):
return (dt - date(2001, 1, 1)).days
def _today(self):
# Used for mocking in tests
return date.today()
default_token_generator = PasswordResetTokenGenerator()
| bsd-3-clause |
accomac/namebench | nb_third_party/dns/rdtypes/IN/PX.py | 248 | 3792 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.exception
import dns.rdata
import dns.name
class PX(dns.rdata.Rdata):
"""PX record.
@ivar preference: the preference value
@type preference: int
@ivar map822: the map822 name
@type map822: dns.name.Name object
@ivar mapx400: the mapx400 name
@type mapx400: dns.name.Name object
@see: RFC 2163"""
__slots__ = ['preference', 'map822', 'mapx400']
def __init__(self, rdclass, rdtype, preference, map822, mapx400):
super(PX, self).__init__(rdclass, rdtype)
self.preference = preference
self.map822 = map822
self.mapx400 = mapx400
def to_text(self, origin=None, relativize=True, **kw):
map822 = self.map822.choose_relativity(origin, relativize)
mapx400 = self.mapx400.choose_relativity(origin, relativize)
return '%d %s %s' % (self.preference, map822, mapx400)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
preference = tok.get_uint16()
map822 = tok.get_name()
map822 = map822.choose_relativity(origin, relativize)
mapx400 = tok.get_name(None)
mapx400 = mapx400.choose_relativity(origin, relativize)
tok.get_eol()
return cls(rdclass, rdtype, preference, map822, mapx400)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
pref = struct.pack("!H", self.preference)
file.write(pref)
self.map822.to_wire(file, None, origin)
self.mapx400.to_wire(file, None, origin)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(preference, ) = struct.unpack('!H', wire[current : current + 2])
current += 2
rdlen -= 2
(map822, cused) = dns.name.from_wire(wire[: current + rdlen],
current)
if cused > rdlen:
raise dns.exception.FormError
current += cused
rdlen -= cused
if not origin is None:
map822 = map822.relativize(origin)
(mapx400, cused) = dns.name.from_wire(wire[: current + rdlen],
current)
if cused != rdlen:
raise dns.exception.FormError
if not origin is None:
mapx400 = mapx400.relativize(origin)
return cls(rdclass, rdtype, preference, map822, mapx400)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.map822 = self.map822.choose_relativity(origin, relativize)
self.mapx400 = self.mapx400.choose_relativity(origin, relativize)
def _cmp(self, other):
sp = struct.pack("!H", self.preference)
op = struct.pack("!H", other.preference)
v = cmp(sp, op)
if v == 0:
v = cmp(self.map822, other.map822)
if v == 0:
v = cmp(self.mapx400, other.mapx400)
return v
| apache-2.0 |
markeTIC/OCB | addons/hr_payroll/report/report_payslip.py | 377 | 1982 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.report import report_sxw
class payslip_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(payslip_report, self).__init__(cr, uid, name, context)
self.localcontext.update({
'get_payslip_lines': self.get_payslip_lines,
})
def get_payslip_lines(self, obj):
payslip_line = self.pool.get('hr.payslip.line')
res = []
ids = []
for id in range(len(obj)):
if obj[id].appears_on_payslip is True:
ids.append(obj[id].id)
if ids:
res = payslip_line.browse(self.cr, self.uid, ids)
return res
class wrapped_report_payslip(osv.AbstractModel):
_name = 'report.hr_payroll.report_payslip'
_inherit = 'report.abstract_report'
_template = 'hr_payroll.report_payslip'
_wrapped_report_class = payslip_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sylarcp/anita | venv/lib/python2.7/site-packages/pip/_vendor/requests/api.py | 188 | 5419 | # -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': ('filename', fileobj)}``) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) whether the SSL cert will be verified. A CA_BUNDLE path can also be provided. Defaults to ``True``.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
# By using the 'with' statement we are sure the session is closed, thus we
# avoid leaving sockets open which can trigger a ResourceWarning in some
# cases, and look like a memory leak in others.
with sessions.Session() as session:
return session.request(method=method, url=url, **kwargs)
def get(url, params=None, **kwargs):
"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, params=params, **kwargs)
def options(url, **kwargs):
"""Sends a OPTIONS request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, json=None, **kwargs):
"""Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
"""Sends a PUT request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
"""Sends a PATCH request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
"""Sends a DELETE request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('delete', url, **kwargs)
| mit |
eduNEXT/edunext-platform | common/test/acceptance/tests/lms/test_lms_index.py | 4 | 2263 | # -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS Index page (aka, Home page). Note that this is different than
what students see @ edx.org because we redirect requests to a separate web application.
"""
import datetime
from common.test.acceptance.pages.lms.index import IndexPage
from common.test.acceptance.tests.helpers import AcceptanceTest
class BaseLmsIndexTest(AcceptanceTest):
""" Base test suite for the LMS Index (Home) page """
def setUp(self):
"""
Initializes the components (page objects, courses, users) for this test suite
"""
# Some state is constructed by the parent setUp() routine
super(BaseLmsIndexTest, self).setUp()
# Load page objects for use by the tests
self.page = IndexPage(self.browser)
# Navigate to the index page and get testing!
self.page.visit()
class LmsIndexPageTest(BaseLmsIndexTest):
""" Test suite for the LMS Index (Home) page """
shard = 2
def setUp(self):
super(LmsIndexPageTest, self).setUp()
# Useful to capture the current datetime for our tests
self.now = datetime.datetime.now()
def test_index_basic_request(self):
"""
Perform a general validation of the index page, renders normally, no exceptions raised, etc.
"""
self.assertTrue(self.page.banner_element.visible)
expected_links = [u'About', u'Blog', u'News', u'Help Center', u'Contact', u'Careers', u'Donate']
self.assertEqual(self.page.footer_links, expected_links)
def test_intro_video_hidden_by_default(self):
"""
Confirm that the intro video is not displayed when using the default configuration
"""
# Ensure the introduction video element is not shown
self.assertFalse(self.page.intro_video_element.visible)
# Still need to figure out how to swap platform settings in the context of a bok choy test
# but we can at least prevent accidental exposure with these validations going forward
# Note: 'present' is a DOM check, whereas 'visible' is an actual browser/screen check
self.assertFalse(self.page.video_modal_element.present)
self.assertFalse(self.page.video_modal_element.visible)
| agpl-3.0 |
maxsocl/django | django/contrib/flatpages/forms.py | 143 | 2036 | from django import forms
from django.conf import settings
from django.contrib.flatpages.models import FlatPage
from django.utils.translation import ugettext, ugettext_lazy as _
class FlatpageForm(forms.ModelForm):
url = forms.RegexField(label=_("URL"), max_length=100, regex=r'^[-\w/\.~]+$',
help_text=_("Example: '/about/contact/'. Make sure to have leading"
" and trailing slashes."),
error_messages={
"invalid": _("This value must contain only letters, numbers,"
" dots, underscores, dashes, slashes or tildes."),
},
)
class Meta:
model = FlatPage
fields = '__all__'
def clean_url(self):
url = self.cleaned_data['url']
if not url.startswith('/'):
raise forms.ValidationError(
ugettext("URL is missing a leading slash."),
code='missing_leading_slash',
)
if (settings.APPEND_SLASH and
'django.middleware.common.CommonMiddleware' in settings.MIDDLEWARE_CLASSES and
not url.endswith('/')):
raise forms.ValidationError(
ugettext("URL is missing a trailing slash."),
code='missing_trailing_slash',
)
return url
def clean(self):
url = self.cleaned_data.get('url', None)
sites = self.cleaned_data.get('sites', None)
same_url = FlatPage.objects.filter(url=url)
if self.instance.pk:
same_url = same_url.exclude(pk=self.instance.pk)
if sites and same_url.filter(sites__in=sites).exists():
for site in sites:
if same_url.filter(sites=site).exists():
raise forms.ValidationError(
_('Flatpage with url %(url)s already exists for site %(site)s'),
code='duplicate_url',
params={'url': url, 'site': site},
)
return super(FlatpageForm, self).clean()
| bsd-3-clause |
leezu/mxnet | tests/python/gpu/test_device.py | 7 | 2564 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import numpy as np
import pytest
import os
import logging
from mxnet.test_utils import environment
shapes = [(10), (100), (1000), (10000), (100000), (2,2), (2,3,4,5,6,7,8)]
keys = [1,2,3,4,5,6,7]
num_gpus = mx.context.num_gpus()
if num_gpus > 8 :
logging.warn("The machine has {} gpus. We will run the test on 8 gpus.".format(num_gpus))
logging.warn("There is a limit for all PCI-E hardware on creating number of P2P peers. The limit is 8.")
num_gpus = 8;
gpus = range(1, 1+num_gpus)
@pytest.mark.skipif(mx.context.num_gpus() < 1, reason="test_device_pushpull needs at least 1 GPU")
def test_device_pushpull():
def check_dense_pushpull(kv_type):
for shape, key in zip(shapes, keys):
for n_gpus in gpus:
kv_device = mx.kv.create(kv_type)
a = mx.nd.ones(shape, mx.gpu(0))
cur_key = str(key*max(gpus)+n_gpus)
kv_device.init(cur_key, a)
arr_list = [mx.nd.ones(shape, mx.gpu(x)) for x in range(n_gpus)]
res = [mx.nd.zeros(shape, mx.gpu(x)) for x in range(n_gpus)]
kv_device.push(cur_key, arr_list)
kv_device.pull(cur_key, res)
for x in range(n_gpus):
assert(np.sum(np.abs((res[x]-n_gpus).asnumpy()))==0)
kvstore_tree_array_bound_values = [None, '1']
kvstore_usetree_values = [None, '1']
for y in kvstore_tree_array_bound_values:
for x in kvstore_usetree_values:
with environment({'MXNET_KVSTORE_USETREE': x,
'MXNET_KVSTORE_TREE_ARRAY_BOUND': y}):
check_dense_pushpull('local')
check_dense_pushpull('device')
if __name__ == '__main__':
test_device_pushpull()
| apache-2.0 |
abhiver222/perkt | face_recognition.py | 2 | 5724 | import cv2
import sys
#import matplotlib.pyplot as pt
import numpy as np
import numpy.linalg as la
import math as mt
#Content of out eigens
<<<<<<< HEAD:face_recognition.py
# there would be five images of each person
# the collumns would be the frob norm of each type
# 4 rows for each person
# 1)Smiling
# 2)Sad
# 3)Serious
# 4)Blank
# 5)If wearing specs then without specs
# 6)looking left
# 7)looking right
#ournorms = {'Abhishek':[5916.56,6155.725,5835.83,6033.245,5922.402,6207.052,6028.91],
# 'Akshay':[6268.704,6335.443,6119.169,6277.252,6126.155,6232.754,6294.937],
# 'Chris':[6479.241,6297.295,6477.624,6463.082,6385.727,6275.596,6200.595],
# 'Tim':[6507.45,6569.225,6637.975,6731.95,6546.934,6239.888,6529.477]}
ournorms = {'Abhishek':[5866.278,6229.924,6123.536,5988.862,5966.183,5990.367,5661.118],
'Akshay':[6748.139,5658.617,6238.200,6671.678,6228.899,6167.573,5830.901],
'Chris':[6312.924,6374.821,6465.274,6275.596,6596.240,6382.099,6456.81], #left right serious
'Tim':[6226.022,6010.737,6107.618,6107.386,5994.380,5916.834,7052.4.3]}
indbuffervals = {'Abhishek':100,
'Akshay':100,
<<<<<<< HEAD:face_recognition.py
'Chris':50,
=======
'Chris':200,
>>>>>>> origin/master:facial_recognition.py
'Tim':150}
#hardcode values into ournorms above
imagePath = sys.argv[1]
<<<<<<< HEAD:face_recognition.py
def recognizeFace(image,faces):
=======
def recognizeFace(faces, image):
>>>>>>> origin/master:facial_recognition.py
retval = True
if(len(faces)>10):
print("Fuck it too many faces shoot everyone")
return True, 100
for i in range(faces.shape[0]):
x, y, w, h = faces[i]
bufw = (400 - w)/2
bufh = (400 - h)/2
inmod = image[y-bufw:y+w+bufw,x-bufh:x+h+bufh]
retwhat = checker(inmod)
retval = retwhat and retval
return retval,len(faces)
=======
#there would be five images of each person
#the collumns would be the frob norm of each type
#4 rows for each person
#1)Smiling
#2)Sad
#3)Serious
#4)Blank
#5)If wearing specs then without specs
#6)looking left
#7)looking right
#ournorms = {'Abhishek':[5916.56,6155.725,5835.83,6033.245,5922.402,6207.052,6028.91],
#'Akshay':[6268.704,6335.443,6119.169,6277.252,6126.155,6232.754,6294.937],
#'Chris':[6479.241,6297.295,6477.624,6463.082,6385.727,6275.596,6200.595],
#'Tim':[6507.45,6569.225,6637.975,6731.95,6546.934,6239.888,6529.477]}
ournorms = {'Abhishek':[5866.278,6229.924,6123.536,5988.862,5966.183,5990.367,5661.118],
'Akshay':[6748.139,5658.617,6238.200,6671.678,6228.899,6167.573,5830.901],
'Chris':[6312.924,6374.821,6465.274,6275.596,6596.240,6382.099,6456.81],
'Tim':[6226.022,6010.737,6107.618,6107.386,5994.380,5916.834,7052.43]}
indbuffervals = {'Abhishek':100,
'Akshay':100,
'Chris':50,
'Tim':150}
#hardcode values into ournorms above
#imagePath = sys.argv[1]
def recognizeFace(image,faces):
retval = True
if(len(faces)>10):
print("Fuck it too many faces shoot everyone")
return True, 100
for i in range(faces.shape[0]):
x, y, w, h = faces[i]
bufw = (400 - w)/2
bufh = (400 - h)/2
inmod = image[y-bufw:y+w+bufw,x-bufh:x+h+bufh]
retwhat = checker(inmod)
retval = retwhat and retval
return retval,len(faces)
>>>>>>> ed4cfb17e7f30b2eda5f67f3661d4598f64953a3:facial_recognition.py
def checker(inmod):
tempnorm = la.norm(inmod)
retval = False
for name,val in ournorms.iteritems():
for j in val:
if(np.abs(j-tempnorm)<indbuffervals[name]):
retval = True;
print("is")
print(name)
break
if(retval):
break
if(not retval):
print("not")
print(name)
return retval
# Get values from command line
def check(image):
#imagePath = sys.argv[1]
#cascPath = sys.argv[2]
imagePath = image
cascPath = "haarcascade_frontalface_default.xml"
# Create the haar cascade
faceCascade = cv2.CascadeClassifier(cascPath)
# Read the image
image = cv2.imread(imagePath)
imnonmod = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.25,
minNeighbors=5,
minSize=(40, 40)
)
<<<<<<< HEAD:face_recognition.py
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
<<<<<<< HEAD:face_recognition.py
what = True
if(len(faces)>0):
what, number = recognizeFace(image,faces)
=======
what = True
if(len(faces)>0):
what, number = recognizeFace(faces, image)
>>>>>>> origin/master:facial_recognition.py
# return what to the arduino
if(what is False):
print("intruder detected")
=======
print "Found {0} faces!".format(len(faces))
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
>>>>>>> ed4cfb17e7f30b2eda5f67f3661d4598f64953a3:facial_recognition.py
what = True
<<<<<<< HEAD:face_recognition.py
cv2.imshow("Faces found", image)
<<<<<<< HEAD:face_recognition.py
cv2.waitKey(0)
=======
#cv2.waitKey(0)
return what
=======
if(len(faces)>0):
what, number = recognizeFace(image,faces)
# return what to the arduino
if(what is False):
print("intruder detected")
>>>>>>> ed4cfb17e7f30b2eda5f67f3661d4598f64953a3:facial_recognition.py
>>>>>>> origin/master:facial_recognition.py
cv2.imshow("Faces found", image)
#cv2.waitKey(0)
check(imagePath)
#check(imagePath)
| mit |
SatoshiNXSimudrone/sl4a-damon-clone | python-build/python-libs/gdata/src/atom/mock_http.py | 278 | 4474 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import atom.http_interface
import atom.url
class Error(Exception):
pass
class NoRecordingFound(Error):
pass
class MockRequest(object):
"""Holds parameters of an HTTP request for matching against future requests.
"""
def __init__(self, operation, url, data=None, headers=None):
self.operation = operation
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
self.url = url
self.data = data
self.headers = headers
class MockResponse(atom.http_interface.HttpResponse):
"""Simulates an httplib.HTTPResponse object."""
def __init__(self, body=None, status=None, reason=None, headers=None):
if body and hasattr(body, 'read'):
self.body = body.read()
else:
self.body = body
if status is not None:
self.status = int(status)
else:
self.status = None
self.reason = reason
self._headers = headers or {}
def read(self):
return self.body
class MockHttpClient(atom.http_interface.GenericHttpClient):
def __init__(self, headers=None, recordings=None, real_client=None):
"""An HttpClient which responds to request with stored data.
The request-response pairs are stored as tuples in a member list named
recordings.
The MockHttpClient can be switched from replay mode to record mode by
setting the real_client member to an instance of an HttpClient which will
make real HTTP requests and store the server's response in list of
recordings.
Args:
headers: dict containing HTTP headers which should be included in all
HTTP requests.
recordings: The initial recordings to be used for responses. This list
contains tuples in the form: (MockRequest, MockResponse)
real_client: An HttpClient which will make a real HTTP request. The
response will be converted into a MockResponse and stored in
recordings.
"""
self.recordings = recordings or []
self.real_client = real_client
self.headers = headers or {}
def add_response(self, response, operation, url, data=None, headers=None):
"""Adds a request-response pair to the recordings list.
After the recording is added, future matching requests will receive the
response.
Args:
response: MockResponse
operation: str
url: str
data: str, Currently the data is ignored when looking for matching
requests.
headers: dict of strings: Currently the headers are ignored when
looking for matching requests.
"""
request = MockRequest(operation, url, data=data, headers=headers)
self.recordings.append((request, response))
def request(self, operation, url, data=None, headers=None):
"""Returns a matching MockResponse from the recordings.
If the real_client is set, the request will be passed along and the
server's response will be added to the recordings and also returned.
If there is no match, a NoRecordingFound error will be raised.
"""
if self.real_client is None:
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
for recording in self.recordings:
if recording[0].operation == operation and recording[0].url == url:
return recording[1]
raise NoRecordingFound('No recodings found for %s %s' % (
operation, url))
else:
# There is a real HTTP client, so make the request, and record the
# response.
response = self.real_client.request(operation, url, data=data,
headers=headers)
# TODO: copy the headers
stored_response = MockResponse(body=response, status=response.status,
reason=response.reason)
self.add_response(stored_response, operation, url, data=data,
headers=headers)
return stored_response
| apache-2.0 |
Endika/odoo | addons/website_forum_doc/__openerp__.py | 322 | 1508 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Documentation',
'category': 'Website',
'summary': 'Forum, Documentation',
'version': '1.0',
'description': """
Documentation based on question and pertinent answers of Forum
""",
'author': 'OpenERP SA',
'depends': [
'website_forum'
],
'data': [
'data/doc_data.xml',
'security/ir.model.access.csv',
'views/doc.xml',
'views/website_doc.xml',
],
'demo': [
'data/doc_demo.xml',
],
'installable': True,
}
| agpl-3.0 |
JJediny/python-social-auth | social/apps/pyramid_app/views.py | 75 | 1091 | from pyramid.view import view_config
from social.utils import module_member
from social.actions import do_auth, do_complete, do_disconnect
from social.apps.pyramid_app.utils import psa, login_required
@view_config(route_name='social.auth', request_method='GET')
@psa('social.complete')
def auth(request):
return do_auth(request.backend, redirect_name='next')
@view_config(route_name='social.complete', request_method=('GET', 'POST'))
@psa('social.complete')
def complete(request, *args, **kwargs):
do_login = module_member(request.backend.setting('LOGIN_FUNCTION'))
return do_complete(request.backend, do_login, request.user,
redirect_name='next', *args, **kwargs)
@view_config(route_name='social.disconnect', request_method=('POST',))
@view_config(route_name='social.disconnect_association',
request_method=('POST',))
@psa()
@login_required
def disconnect(request):
return do_disconnect(request.backend, request.user,
request.matchdict.get('association_id'),
redirect_name='next')
| bsd-3-clause |
envoyproxy/envoy | tools/code_format/header_order.py | 1 | 4254 | #!/usr/bin/env python3
# Enforce header order in a given file. This will only reorder in the first sequence of contiguous
# #include statements, so it will not play well with #ifdef.
#
# This attempts to enforce the guidelines at
# https://google.github.io/styleguide/cppguide.html#Names_and_Order_of_Includes
# with some allowances for Envoy-specific idioms.
#
# There is considerable overlap with what this does and clang-format's IncludeCategories (see
# https://clang.llvm.org/docs/ClangFormatStyleOptions.html). But, clang-format doesn't seem smart
# enough to handle block splitting and correctly detecting the main header subject to the Envoy
# canonical paths.
import argparse
import common
import pathlib
import re
import sys
def reorder_headers(path):
source = pathlib.Path(path).read_text(encoding='utf-8')
all_lines = iter(source.split('\n'))
before_includes_lines = []
includes_lines = []
after_includes_lines = []
# Collect all the lines prior to the first #include in before_includes_lines.
try:
while True:
line = next(all_lines)
if line.startswith('#include'):
includes_lines.append(line)
break
before_includes_lines.append(line)
except StopIteration:
pass
# Collect all the #include and whitespace lines in includes_lines.
try:
while True:
line = next(all_lines)
if not line:
continue
if not line.startswith('#include'):
after_includes_lines.append(line)
break
includes_lines.append(line)
except StopIteration:
pass
# Collect the remaining lines in after_includes_lines.
after_includes_lines += list(all_lines)
# Filter for includes that finds the #include of the header file associated with the source file
# being processed. E.g. if 'path' is source/common/common/hex.cc, this filter matches
# "source/common/common/hex.h".
def file_header_filter():
return lambda f: f.endswith('.h"') and path.endswith(f[1:-3] + '.cc')
def regex_filter(regex):
return lambda f: re.match(regex, f)
# Filters that define the #include blocks
block_filters = [
file_header_filter(),
regex_filter('<.*\.h>'),
regex_filter('<.*>'),
]
for subdir in include_dir_order:
block_filters.append(regex_filter('"' + subdir + '/.*"'))
blocks = []
already_included = set([])
for b in block_filters:
block = []
for line in includes_lines:
header = line[len('#include '):]
if line not in already_included and b(header):
block.append(line)
already_included.add(line)
if len(block) > 0:
blocks.append(block)
# Anything not covered by block_filters gets its own block.
misc_headers = list(set(includes_lines).difference(already_included))
if len(misc_headers) > 0:
blocks.append(misc_headers)
reordered_includes_lines = '\n\n'.join(['\n'.join(sorted(block)) for block in blocks])
if reordered_includes_lines:
reordered_includes_lines += '\n'
return '\n'.join(
filter(
lambda x: x, [
'\n'.join(before_includes_lines),
reordered_includes_lines,
'\n'.join(after_includes_lines),
]))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Header reordering.')
parser.add_argument('--path', type=str, help='specify the path to the header file')
parser.add_argument('--rewrite', action='store_true', help='rewrite header file in-place')
parser.add_argument(
'--include_dir_order',
type=str,
default=','.join(common.include_dir_order()),
help='specify the header block include directory order')
args = parser.parse_args()
target_path = args.path
include_dir_order = args.include_dir_order.split(',')
reorderd_source = reorder_headers(target_path)
if args.rewrite:
pathlib.Path(target_path).write_text(reorderd_source, encoding='utf-8')
else:
sys.stdout.buffer.write(reorderd_source.encode('utf-8'))
| apache-2.0 |
2013Commons/hue | desktop/core/ext-py/python-openid-2.2.5/openid/urinorm.py | 159 | 5230 | import re
# from appendix B of rfc 3986 (http://www.ietf.org/rfc/rfc3986.txt)
uri_pattern = r'^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?'
uri_re = re.compile(uri_pattern)
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
#
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
#
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
uri_illegal_char_re = re.compile(
"[^-A-Za-z0-9:/?#[\]@!$&'()*+,;=._~%]", re.UNICODE)
authority_pattern = r'^([^@]*@)?([^:]*)(:.*)?'
authority_re = re.compile(authority_pattern)
pct_encoded_pattern = r'%([0-9A-Fa-f]{2})'
pct_encoded_re = re.compile(pct_encoded_pattern)
try:
unichr(0x10000)
except ValueError:
# narrow python build
UCSCHAR = [
(0xA0, 0xD7FF),
(0xF900, 0xFDCF),
(0xFDF0, 0xFFEF),
]
IPRIVATE = [
(0xE000, 0xF8FF),
]
else:
UCSCHAR = [
(0xA0, 0xD7FF),
(0xF900, 0xFDCF),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD),
(0x20000, 0x2FFFD),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD),
(0x50000, 0x5FFFD),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD),
(0x80000, 0x8FFFD),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD),
(0xB0000, 0xBFFFD),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD),
(0xE1000, 0xEFFFD),
]
IPRIVATE = [
(0xE000, 0xF8FF),
(0xF0000, 0xFFFFD),
(0x100000, 0x10FFFD),
]
_unreserved = [False] * 256
for _ in range(ord('A'), ord('Z') + 1): _unreserved[_] = True
for _ in range(ord('0'), ord('9') + 1): _unreserved[_] = True
for _ in range(ord('a'), ord('z') + 1): _unreserved[_] = True
_unreserved[ord('-')] = True
_unreserved[ord('.')] = True
_unreserved[ord('_')] = True
_unreserved[ord('~')] = True
_escapeme_re = re.compile('[%s]' % (''.join(
map(lambda (m, n): u'%s-%s' % (unichr(m), unichr(n)),
UCSCHAR + IPRIVATE)),))
def _pct_escape_unicode(char_match):
c = char_match.group()
return ''.join(['%%%X' % (ord(octet),) for octet in c.encode('utf-8')])
def _pct_encoded_replace_unreserved(mo):
try:
i = int(mo.group(1), 16)
if _unreserved[i]:
return chr(i)
else:
return mo.group().upper()
except ValueError:
return mo.group()
def _pct_encoded_replace(mo):
try:
return chr(int(mo.group(1), 16))
except ValueError:
return mo.group()
def remove_dot_segments(path):
result_segments = []
while path:
if path.startswith('../'):
path = path[3:]
elif path.startswith('./'):
path = path[2:]
elif path.startswith('/./'):
path = path[2:]
elif path == '/.':
path = '/'
elif path.startswith('/../'):
path = path[3:]
if result_segments:
result_segments.pop()
elif path == '/..':
path = '/'
if result_segments:
result_segments.pop()
elif path == '..' or path == '.':
path = ''
else:
i = 0
if path[0] == '/':
i = 1
i = path.find('/', i)
if i == -1:
i = len(path)
result_segments.append(path[:i])
path = path[i:]
return ''.join(result_segments)
def urinorm(uri):
if isinstance(uri, unicode):
uri = _escapeme_re.sub(_pct_escape_unicode, uri).encode('ascii')
illegal_mo = uri_illegal_char_re.search(uri)
if illegal_mo:
raise ValueError('Illegal characters in URI: %r at position %s' %
(illegal_mo.group(), illegal_mo.start()))
uri_mo = uri_re.match(uri)
scheme = uri_mo.group(2)
if scheme is None:
raise ValueError('No scheme specified')
scheme = scheme.lower()
if scheme not in ('http', 'https'):
raise ValueError('Not an absolute HTTP or HTTPS URI: %r' % (uri,))
authority = uri_mo.group(4)
if authority is None:
raise ValueError('Not an absolute URI: %r' % (uri,))
authority_mo = authority_re.match(authority)
if authority_mo is None:
raise ValueError('URI does not have a valid authority: %r' % (uri,))
userinfo, host, port = authority_mo.groups()
if userinfo is None:
userinfo = ''
if '%' in host:
host = host.lower()
host = pct_encoded_re.sub(_pct_encoded_replace, host)
host = unicode(host, 'utf-8').encode('idna')
else:
host = host.lower()
if port:
if (port == ':' or
(scheme == 'http' and port == ':80') or
(scheme == 'https' and port == ':443')):
port = ''
else:
port = ''
authority = userinfo + host + port
path = uri_mo.group(5)
path = pct_encoded_re.sub(_pct_encoded_replace_unreserved, path)
path = remove_dot_segments(path)
if not path:
path = '/'
query = uri_mo.group(6)
if query is None:
query = ''
fragment = uri_mo.group(8)
if fragment is None:
fragment = ''
return scheme + '://' + authority + path + query + fragment
| apache-2.0 |
pokowaka/googletest | test/gtest_shuffle_test.py | 3023 | 12549 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
shipci/sympy | sympy/polys/dispersion.py | 20 | 5810 | from __future__ import print_function, division
from sympy.core import S
from sympy.polys import Poly
from sympy.polys.polytools import factor_list
def dispersionset(p, q=None, *gens, **args):
r"""Compute the *dispersion set* of two polynomials.
For two polynomials `f(x)` and `g(x)` with `\deg f > 0`
and `\deg g > 0` the dispersion set `\operatorname{J}(f, g)` is defined as:
.. math::
\operatorname{J}(f, g)
& := \{a \in \mathbb{N}_0 | \gcd(f(x), g(x+a)) \neq 1\} \\
& = \{a \in \mathbb{N}_0 | \deg \gcd(f(x), g(x+a)) \geq 1\}
For a single polynomial one defines `\operatorname{J}(f) := \operatorname{J}(f, f)`.
Examples
========
>>> from sympy import poly
>>> from sympy.polys.dispersion import dispersion, dispersionset
>>> from sympy.abc import x
Dispersion set and dispersion of a simple polynomial:
>>> fp = poly((x - 3)*(x + 3), x)
>>> sorted(dispersionset(fp))
[0, 6]
>>> dispersion(fp)
6
Note that the definition of the dispersion is not symmetric:
>>> fp = poly(x**4 - 3*x**2 + 1, x)
>>> gp = fp.shift(-3)
>>> sorted(dispersionset(fp, gp))
[2, 3, 4]
>>> dispersion(fp, gp)
4
>>> sorted(dispersionset(gp, fp))
[]
>>> dispersion(gp, fp)
-oo
Computing the dispersion also works over field extensions:
>>> from sympy import sqrt
>>> fp = poly(x**2 + sqrt(5)*x - 1, x, domain='QQ<sqrt(5)>')
>>> gp = poly(x**2 + (2 + sqrt(5))*x + sqrt(5), x, domain='QQ<sqrt(5)>')
>>> sorted(dispersionset(fp, gp))
[2]
>>> sorted(dispersionset(gp, fp))
[1, 4]
We can even perform the computations for polynomials
having symbolic coefficients:
>>> from sympy.abc import a
>>> fp = poly(4*x**4 + (4*a + 8)*x**3 + (a**2 + 6*a + 4)*x**2 + (a**2 + 2*a)*x, x)
>>> sorted(dispersionset(fp))
[0, 1]
See Also
========
dispersion
References
==========
1. [ManWright94]_
2. [Koepf98]_
3. [Abramov71]_
4. [Man93]_
"""
# Check for valid input
same = False if q is not None else True
if same:
q = p
p = Poly(p, *gens, **args)
q = Poly(q, *gens, **args)
if not p.is_univariate or not q.is_univariate:
raise ValueError("Polynomials need to be univariate")
# The generator
if not p.gen == q.gen:
raise ValueError("Polynomials must have the same generator")
gen = p.gen
# We define the dispersion of constant polynomials to be zero
if p.degree() < 1 or q.degree() < 1:
return set([0])
# Factor p and q over the rationals
fp = p.factor_list()
fq = q.factor_list() if not same else fp
# Iterate over all pairs of factors
J = set([])
for s, unused in fp[1]:
for t, unused in fq[1]:
m = s.degree()
n = t.degree()
if n != m:
continue
an = s.LC()
bn = t.LC()
if not (an - bn).is_zero:
continue
# Note that the roles of `s` and `t` below are switched
# w.r.t. the original paper. This is for consistency
# with the description in the book of W. Koepf.
anm1 = s.coeff_monomial(gen**(m-1))
bnm1 = t.coeff_monomial(gen**(n-1))
alpha = (anm1 - bnm1) / S(n*bn)
if not alpha.is_integer:
continue
if alpha < 0 or alpha in J:
continue
if n > 1 and not (s - t.shift(alpha)).is_zero:
continue
J.add(alpha)
return J
def dispersion(p, q=None, *gens, **args):
r"""Compute the *dispersion* of polynomials.
For two polynomials `f(x)` and `g(x)` with `\deg f > 0`
and `\deg g > 0` the dispersion `\operatorname{dis}(f, g)` is defined as:
.. math::
\operatorname{dis}(f, g)
& := \max\{ J(f,g) \cup \{0\} \} \\
& = \max\{ \{a \in \mathbb{N} | \gcd(f(x), g(x+a)) \neq 1\} \cup \{0\} \}
and for a single polynomial `\operatorname{dis}(f) := \operatorname{dis}(f, f)`.
Note that we make the definition `\max\{\} := -\infty`.
Examples
========
>>> from sympy import poly
>>> from sympy.polys.dispersion import dispersion, dispersionset
>>> from sympy.abc import x
Dispersion set and dispersion of a simple polynomial:
>>> fp = poly((x - 3)*(x + 3), x)
>>> sorted(dispersionset(fp))
[0, 6]
>>> dispersion(fp)
6
Note that the definition of the dispersion is not symmetric:
>>> fp = poly(x**4 - 3*x**2 + 1, x)
>>> gp = fp.shift(-3)
>>> sorted(dispersionset(fp, gp))
[2, 3, 4]
>>> dispersion(fp, gp)
4
>>> sorted(dispersionset(gp, fp))
[]
>>> dispersion(gp, fp)
-oo
The maximum of an empty set is defined to be `-\infty`
as seen in this example.
Computing the dispersion also works over field extensions:
>>> from sympy import sqrt
>>> fp = poly(x**2 + sqrt(5)*x - 1, x, domain='QQ<sqrt(5)>')
>>> gp = poly(x**2 + (2 + sqrt(5))*x + sqrt(5), x, domain='QQ<sqrt(5)>')
>>> sorted(dispersionset(fp, gp))
[2]
>>> sorted(dispersionset(gp, fp))
[1, 4]
We can even perform the computations for polynomials
having symbolic coefficients:
>>> from sympy.abc import a
>>> fp = poly(4*x**4 + (4*a + 8)*x**3 + (a**2 + 6*a + 4)*x**2 + (a**2 + 2*a)*x, x)
>>> sorted(dispersionset(fp))
[0, 1]
See Also
========
dispersionset
References
==========
1. [ManWright94]_
2. [Koepf98]_
3. [Abramov71]_
4. [Man93]_
"""
J = dispersionset(p, q, *gens, **args)
if not J:
# Definition for maximum of empty set
j = S.NegativeInfinity
else:
j = max(J)
return j
| bsd-3-clause |
cfg2015/EPT-2015-2 | addons/board/__init__.py | 439 | 1144 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import board
import controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
joopert/home-assistant | homeassistant/components/wink/sensor.py | 5 | 3228 | """Support for Wink sensors."""
import logging
import pywink
from homeassistant.const import TEMP_CELSIUS
from . import DOMAIN, WinkDevice
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPES = ["temperature", "humidity", "balance", "proximity"]
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Wink platform."""
for sensor in pywink.get_sensors():
_id = sensor.object_id() + sensor.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
if sensor.capability() in SENSOR_TYPES:
add_entities([WinkSensorDevice(sensor, hass)])
for eggtray in pywink.get_eggtrays():
_id = eggtray.object_id() + eggtray.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkSensorDevice(eggtray, hass)])
for tank in pywink.get_propane_tanks():
_id = tank.object_id() + tank.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkSensorDevice(tank, hass)])
for piggy_bank in pywink.get_piggy_banks():
_id = piggy_bank.object_id() + piggy_bank.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
try:
if piggy_bank.capability() in SENSOR_TYPES:
add_entities([WinkSensorDevice(piggy_bank, hass)])
except AttributeError:
_LOGGER.info("Device is not a sensor")
class WinkSensorDevice(WinkDevice):
"""Representation of a Wink sensor."""
def __init__(self, wink, hass):
"""Initialize the Wink device."""
super().__init__(wink, hass)
self.capability = self.wink.capability()
if self.wink.unit() == "°":
self._unit_of_measurement = TEMP_CELSIUS
else:
self._unit_of_measurement = self.wink.unit()
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[DOMAIN]["entities"]["sensor"].append(self)
@property
def state(self):
"""Return the state."""
state = None
if self.capability == "humidity":
if self.wink.state() is not None:
state = round(self.wink.state())
elif self.capability == "temperature":
if self.wink.state() is not None:
state = round(self.wink.state(), 1)
elif self.capability == "balance":
if self.wink.state() is not None:
state = round(self.wink.state() / 100, 2)
elif self.capability == "proximity":
if self.wink.state() is not None:
state = self.wink.state()
else:
state = self.wink.state()
return state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the state attributes."""
super_attrs = super().device_state_attributes
try:
super_attrs["egg_times"] = self.wink.eggs()
except AttributeError:
# Ignore error, this sensor isn't an eggminder
pass
return super_attrs
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.