repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
jart/tensorflow | tensorflow/python/kernel_tests/softsign_op_test.py | 91 | 2775 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Softsign and SoftsignGrad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class SoftsignTest(test.TestCase):
def _npSoftsign(self, np_features):
return np_features / (1 + np.abs(np_features))
def _testSoftsign(self, np_features, use_gpu=False):
np_softsign = self._npSoftsign(np_features)
with self.test_session(use_gpu=use_gpu):
softsign = nn_ops.softsign(np_features)
tf_softsign = softsign.eval()
self.assertAllClose(np_softsign, tf_softsign)
self.assertShapeEqual(np_softsign, softsign)
def testNumbers(self):
for t in [np.float, np.double]:
self._testSoftsign(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=False)
self._testSoftsign(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=True)
def testGradient(self):
with self.test_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softsign(x, name="softsign")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
print("softsign (float) gradient err = ", err)
self.assertLess(err, 1e-4)
def testWarnInts(self):
# NOTE(irving): Actually I don't know how to intercept the warning, but
# let's make sure it runs. I promised I've looked, and there was a warning.
with self.test_session():
nn_ops.softsign(constant_op.constant(7)).eval()
if __name__ == "__main__":
test.main()
| apache-2.0 |
mdrumond/tensorflow | tensorflow/python/kernel_tests/decode_csv_op_test.py | 7 | 6114 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DecodeCSV op from parsing_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
class DecodeCSVOpTest(test.TestCase):
def _test(self, args, expected_out=None, expected_err_re=None):
with self.test_session() as sess:
decode = parsing_ops.decode_csv(**args)
if expected_err_re is None:
out = sess.run(decode)
for i, field in enumerate(out):
if field.dtype == np.float32:
self.assertAllClose(field, expected_out[i])
else:
self.assertAllEqual(field, expected_out[i])
else:
with self.assertRaisesOpError(expected_err_re):
sess.run(decode)
def testSimple(self):
args = {
"records": ["1", "2", '"3"'],
"record_defaults": [[1]],
}
expected_out = [[1, 2, 3]]
self._test(args, expected_out)
def testSimpleNoQuoteDelimiter(self):
args = {
"records": ["1", "2", '"3"'],
"record_defaults": [[""]],
"use_quote_delim": False,
}
expected_out = [[b"1", b"2", b'"3"']]
self._test(args, expected_out)
def testScalar(self):
args = {"records": '1,""', "record_defaults": [[3], [4]]}
expected_out = [1, 4]
self._test(args, expected_out)
def test2D(self):
args = {"records": [["1", "2"], ['""', "4"]], "record_defaults": [[5]]}
expected_out = [[[1, 2], [5, 4]]]
self._test(args, expected_out)
def test2DNoQuoteDelimiter(self):
args = {"records": [["1", "2"], ['""', '"']],
"record_defaults": [[""]],
"use_quote_delim": False}
expected_out = [[[b"1", b"2"], [b'""', b'"']]]
self._test(args, expected_out)
def testInt64(self):
args = {
"records": ["1", "2", '"2147483648"'],
"record_defaults": [np.array(
[], dtype=np.int64)],
}
expected_out = [[1, 2, 2147483648]]
self._test(args, expected_out)
def testComplexString(self):
args = {
"records": ['"1.0"', '"ab , c"', '"a\nbc"', '"ab""c"', " abc "],
"record_defaults": [["1"]]
}
expected_out = [[b"1.0", b"ab , c", b"a\nbc", b'ab"c', b" abc "]]
self._test(args, expected_out)
def testMultiRecords(self):
args = {
"records": ["1.0,4,aa", "0.2,5,bb", "3,6,cc"],
"record_defaults": [[1.0], [1], ["aa"]]
}
expected_out = [[1.0, 0.2, 3], [4, 5, 6], [b"aa", b"bb", b"cc"]]
self._test(args, expected_out)
def testNA(self):
args = {
"records": ["2.0,NA,aa", "NA,5,bb", "3,6,NA"],
"record_defaults": [[0.0], [0], [""]],
"na_value": "NA"
}
expected_out = [[2.0, 0.0, 3], [0, 5, 6], [b"aa", b"bb", b""]]
self._test(args, expected_out)
def testWithDefaults(self):
args = {
"records": [",1,", "0.2,3,bcd", "3.0,,"],
"record_defaults": [[1.0], [0], ["a"]]
}
expected_out = [[1.0, 0.2, 3.0], [1, 3, 0], [b"a", b"bcd", b"a"]]
self._test(args, expected_out)
def testWithDefaultsAndNoQuoteDelimiter(self):
args = {
"records": [",1,", "0.2,3,bcd", '3.0,,"'],
"record_defaults": [[1.0], [0], ["a"]],
"use_quote_delim": False,
}
expected_out = [[1.0, 0.2, 3.0], [1, 3, 0], [b"a", b"bcd", b"\""]]
self._test(args, expected_out)
def testWithTabDelim(self):
args = {
"records": ["1\t1", "0.2\t3", "3.0\t"],
"record_defaults": [[1.0], [0]],
"field_delim": "\t"
}
expected_out = [[1.0, 0.2, 3.0], [1, 3, 0]]
self._test(args, expected_out)
def testWithoutDefaultsError(self):
args = {
"records": [",1", "0.2,3", "3.0,"],
"record_defaults": [[1.0], np.array(
[], dtype=np.int32)]
}
self._test(
args, expected_err_re="Field 1 is required but missing in record 2!")
def testWrongFieldIntError(self):
args = {
"records": [",1", "0.2,234a", "3.0,2"],
"record_defaults": [[1.0], np.array(
[], dtype=np.int32)]
}
self._test(
args, expected_err_re="Field 1 in record 1 is not a valid int32: 234a")
def testOutOfRangeError(self):
args = {
"records": ["1", "9999999999999999999999999", "3"],
"record_defaults": [[1]]
}
self._test(
args, expected_err_re="Field 0 in record 1 is not a valid int32: ")
def testWrongFieldFloatError(self):
args = {
"records": [",1", "0.2,2", "3.0adf,3"],
"record_defaults": [[1.0], np.array(
[], dtype=np.int32)]
}
self._test(
args, expected_err_re="Field 0 in record 2 is not a valid float: ")
def testWrongFieldStringError(self):
args = {"records": ['"1,a,"', "0.22", 'a"bc'], "record_defaults": [["a"]]}
self._test(
args, expected_err_re="Unquoted fields cannot have quotes/CRLFs inside")
def testWrongDefaults(self):
args = {"records": [",1", "0.2,2", "3.0adf,3"], "record_defaults": [[1.0]]}
self._test(args, expected_err_re="Expect 1 fields but have 2 in record 0")
def testShortQuotedString(self):
args = {
"records": ["\""],
"record_defaults": [["default"]],
}
self._test(
args, expected_err_re="Quoted field has to end with quote followed.*")
if __name__ == "__main__":
test.main()
| apache-2.0 |
timothsp/where2ate | venv/lib/python3.3/site-packages/pip/pep425tags.py | 249 | 4427 | """Generate and work with PEP 425 Compatibility Tags."""
from __future__ import absolute_import
import re
import sys
import warnings
try:
import sysconfig
except ImportError: # pragma nocover
# Python < 2.7
import distutils.sysconfig as sysconfig
import distutils.util
_osx_arch_pat = re.compile(r'(.+)_(\d+)_(\d+)_(.+)')
def get_abbr_impl():
"""Return abbreviated implementation name."""
if hasattr(sys, 'pypy_version_info'):
pyimpl = 'pp'
elif sys.platform.startswith('java'):
pyimpl = 'jy'
elif sys.platform == 'cli':
pyimpl = 'ip'
else:
pyimpl = 'cp'
return pyimpl
def get_impl_ver():
"""Return implementation version."""
return ''.join(map(str, sys.version_info[:2]))
def get_platform():
"""Return our platform name 'win32', 'linux_x86_64'"""
# XXX remove distutils dependency
return distutils.util.get_platform().replace('.', '_').replace('-', '_')
def get_supported(versions=None, noarch=False):
"""Return a list of supported tags for each version specified in
`versions`.
:param versions: a list of string versions, of the form ["33", "32"],
or None. The first version will be assumed to support our ABI.
"""
supported = []
# Versions must be given with respect to the preference
if versions is None:
versions = []
major = sys.version_info[0]
# Support all previous minor Python versions.
for minor in range(sys.version_info[1], -1, -1):
versions.append(''.join(map(str, (major, minor))))
impl = get_abbr_impl()
abis = []
try:
soabi = sysconfig.get_config_var('SOABI')
except IOError as e: # Issue #1074
warnings.warn("{0}".format(e), RuntimeWarning)
soabi = None
if soabi and soabi.startswith('cpython-'):
abis[0:0] = ['cp' + soabi.split('-')[1]]
abi3s = set()
import imp
for suffix in imp.get_suffixes():
if suffix[0].startswith('.abi'):
abi3s.add(suffix[0].split('.', 2)[1])
abis.extend(sorted(list(abi3s)))
abis.append('none')
if not noarch:
arch = get_platform()
if sys.platform == 'darwin':
# support macosx-10.6-intel on macosx-10.9-x86_64
match = _osx_arch_pat.match(arch)
if match:
name, major, minor, actual_arch = match.groups()
actual_arches = [actual_arch]
if actual_arch in ('i386', 'ppc'):
actual_arches.append('fat')
if actual_arch in ('i386', 'x86_64'):
actual_arches.append('intel')
if actual_arch in ('i386', 'ppc', 'x86_64'):
actual_arches.append('fat3')
if actual_arch in ('ppc64', 'x86_64'):
actual_arches.append('fat64')
if actual_arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
actual_arches.append('universal')
tpl = '{0}_{1}_%i_%s'.format(name, major)
arches = []
for m in range(int(minor) + 1):
for a in actual_arches:
arches.append(tpl % (m, a))
else:
# arch pattern didn't match (?!)
arches = [arch]
else:
arches = [arch]
# Current version, current API (built specifically for our Python):
for abi in abis:
for arch in arches:
supported.append(('%s%s' % (impl, versions[0]), abi, arch))
# Has binaries, does not use the Python API:
supported.append(('py%s' % (versions[0][0]), 'none', arch))
# No abi / arch, but requires our implementation:
for i, version in enumerate(versions):
supported.append(('%s%s' % (impl, version), 'none', 'any'))
if i == 0:
# Tagged specifically as being cross-version compatible
# (with just the major version specified)
supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
# No abi / arch, generic Python
for i, version in enumerate(versions):
supported.append(('py%s' % (version,), 'none', 'any'))
if i == 0:
supported.append(('py%s' % (version[0]), 'none', 'any'))
return supported
supported_tags = get_supported()
supported_tags_noarch = get_supported(noarch=True)
| cc0-1.0 |
JCB-K/xhtml2pdf | test/story2canvas.py | 155 | 1317 | # -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "$Revision: 194 $"
__author__ = "$Author: holtwick $"
__date__ = "$Date: 2008-04-18 18:59:53 +0200 (Fr, 18 Apr 2008) $"
from reportlab.pdfgen.canvas import Canvas
from reportlab.lib.units import inch
from reportlab.platypus import Frame
import ho.pisa as pisa
def test(filename):
# Convert HTML to "Reportlab Story" structure
story = pisa.pisaStory("""
<h1>Sample</h1>
<p>Hello <b>World</b>!</p>
""" * 20).story
# Draw to Canvas
c = Canvas(filename)
f = Frame(inch, inch, 6*inch, 9*inch, showBoundary=1)
f.addFromList(story,c)
c.save()
# Show PDF
pisa.startViewer(filename)
if __name__=="__main__":
test('story2canvas.pdf')
| apache-2.0 |
PopCap/GameIdea | Engine/Source/ThirdParty/HTML5/emsdk/Win64/python/2.7.5.3_64bit/Lib/distutils/command/install.py | 88 | 26260 | """distutils.command.install
Implements the Distutils 'install' command."""
from distutils import log
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id$"
import sys, os, string
from types import *
from distutils.core import Command
from distutils.debug import DEBUG
from distutils.sysconfig import get_config_vars
from distutils.errors import DistutilsPlatformError
from distutils.file_util import write_file
from distutils.util import convert_path, subst_vars, change_root
from distutils.util import get_platform
from distutils.errors import DistutilsOptionError
from site import USER_BASE
from site import USER_SITE
if sys.version < "2.2":
WINDOWS_SCHEME = {
'purelib': '$base',
'platlib': '$base',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
}
else:
WINDOWS_SCHEME = {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
}
INSTALL_SCHEMES = {
'unix_prefix': {
'purelib': '$base/lib/python$py_version_short/site-packages',
'platlib': '$platbase/lib/python$py_version_short/site-packages',
'headers': '$base/include/python$py_version_short/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
},
'unix_home': {
'purelib': '$base/lib/python',
'platlib': '$base/lib/python',
'headers': '$base/include/python/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
},
'unix_user': {
'purelib': '$usersite',
'platlib': '$usersite',
'headers': '$userbase/include/python$py_version_short/$dist_name',
'scripts': '$userbase/bin',
'data' : '$userbase',
},
'nt': WINDOWS_SCHEME,
'nt_user': {
'purelib': '$usersite',
'platlib': '$usersite',
'headers': '$userbase/Python$py_version_nodot/Include/$dist_name',
'scripts': '$userbase/Scripts',
'data' : '$userbase',
},
'os2': {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
},
'os2_home': {
'purelib': '$usersite',
'platlib': '$usersite',
'headers': '$userbase/include/python$py_version_short/$dist_name',
'scripts': '$userbase/bin',
'data' : '$userbase',
},
}
# The keys to an installation scheme; if any new types of files are to be
# installed, be sure to add an entry to every installation scheme above,
# and to SCHEME_KEYS here.
SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data')
class install (Command):
description = "install everything from build directory"
user_options = [
# Select installation scheme and set base director(y|ies)
('prefix=', None,
"installation prefix"),
('exec-prefix=', None,
"(Unix only) prefix for platform-specific files"),
('home=', None,
"(Unix only) home directory to install under"),
('user', None,
"install in user site-package '%s'" % USER_SITE),
# Or, just set the base director(y|ies)
('install-base=', None,
"base installation directory (instead of --prefix or --home)"),
('install-platbase=', None,
"base installation directory for platform-specific files " +
"(instead of --exec-prefix or --home)"),
('root=', None,
"install everything relative to this alternate root directory"),
# Or, explicitly set the installation scheme
('install-purelib=', None,
"installation directory for pure Python module distributions"),
('install-platlib=', None,
"installation directory for non-pure module distributions"),
('install-lib=', None,
"installation directory for all module distributions " +
"(overrides --install-purelib and --install-platlib)"),
('install-headers=', None,
"installation directory for C/C++ headers"),
('install-scripts=', None,
"installation directory for Python scripts"),
('install-data=', None,
"installation directory for data files"),
# Byte-compilation options -- see install_lib.py for details, as
# these are duplicated from there (but only install_lib does
# anything with them).
('compile', 'c', "compile .py to .pyc [default]"),
('no-compile', None, "don't compile .py files"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
# Miscellaneous control options
('force', 'f',
"force installation (overwrite any existing files)"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
# Where to install documentation (eventually!)
#('doc-format=', None, "format of documentation to generate"),
#('install-man=', None, "directory for Unix man pages"),
#('install-html=', None, "directory for HTML documentation"),
#('install-info=', None, "directory for GNU info files"),
('record=', None,
"filename in which to record list of installed files"),
]
boolean_options = ['compile', 'force', 'skip-build', 'user']
negative_opt = {'no-compile' : 'compile'}
def initialize_options (self):
# High-level options: these select both an installation base
# and scheme.
self.prefix = None
self.exec_prefix = None
self.home = None
self.user = 0
# These select only the installation base; it's up to the user to
# specify the installation scheme (currently, that means supplying
# the --install-{platlib,purelib,scripts,data} options).
self.install_base = None
self.install_platbase = None
self.root = None
# These options are the actual installation directories; if not
# supplied by the user, they are filled in using the installation
# scheme implied by prefix/exec-prefix/home and the contents of
# that installation scheme.
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_userbase = USER_BASE
self.install_usersite = USER_SITE
self.compile = None
self.optimize = None
# These two are for putting non-packagized distributions into their
# own directory and creating a .pth file if it makes sense.
# 'extra_path' comes from the setup file; 'install_path_file' can
# be turned off if it makes no sense to install a .pth file. (But
# better to install it uselessly than to guess wrong and not
# install it when it's necessary and would be used!) Currently,
# 'install_path_file' is always true unless some outsider meddles
# with it.
self.extra_path = None
self.install_path_file = 1
# 'force' forces installation, even if target files are not
# out-of-date. 'skip_build' skips running the "build" command,
# handy if you know it's not necessary. 'warn_dir' (which is *not*
# a user option, it's just there so the bdist_* commands can turn
# it off) determines whether we warn about installing to a
# directory not in sys.path.
self.force = 0
self.skip_build = 0
self.warn_dir = 1
# These are only here as a conduit from the 'build' command to the
# 'install_*' commands that do the real work. ('build_base' isn't
# actually used anywhere, but it might be useful in future.) They
# are not user options, because if the user told the install
# command where the build directory is, that wouldn't affect the
# build command.
self.build_base = None
self.build_lib = None
# Not defined yet because we don't know anything about
# documentation yet.
#self.install_man = None
#self.install_html = None
#self.install_info = None
self.record = None
# -- Option finalizing methods -------------------------------------
# (This is rather more involved than for most commands,
# because this is where the policy for installing third-
# party Python modules on various platforms given a wide
# array of user input is decided. Yes, it's quite complex!)
def finalize_options (self):
# This method (and its pliant slaves, like 'finalize_unix()',
# 'finalize_other()', and 'select_scheme()') is where the default
# installation directories for modules, extension modules, and
# anything else we care to install from a Python module
# distribution. Thus, this code makes a pretty important policy
# statement about how third-party stuff is added to a Python
# installation! Note that the actual work of installation is done
# by the relatively simple 'install_*' commands; they just take
# their orders from the installation directory options determined
# here.
# Check for errors/inconsistencies in the options; first, stuff
# that's wrong on any platform.
if ((self.prefix or self.exec_prefix or self.home) and
(self.install_base or self.install_platbase)):
raise DistutilsOptionError, \
("must supply either prefix/exec-prefix/home or " +
"install-base/install-platbase -- not both")
if self.home and (self.prefix or self.exec_prefix):
raise DistutilsOptionError, \
"must supply either home or prefix/exec-prefix -- not both"
if self.user and (self.prefix or self.exec_prefix or self.home or
self.install_base or self.install_platbase):
raise DistutilsOptionError("can't combine user with prefix, "
"exec_prefix/home, or install_(plat)base")
# Next, stuff that's wrong (or dubious) only on certain platforms.
if os.name != "posix":
if self.exec_prefix:
self.warn("exec-prefix option ignored on this platform")
self.exec_prefix = None
# Now the interesting logic -- so interesting that we farm it out
# to other methods. The goal of these methods is to set the final
# values for the install_{lib,scripts,data,...} options, using as
# input a heady brew of prefix, exec_prefix, home, install_base,
# install_platbase, user-supplied versions of
# install_{purelib,platlib,lib,scripts,data,...}, and the
# INSTALL_SCHEME dictionary above. Phew!
self.dump_dirs("pre-finalize_{unix,other}")
if os.name == 'posix':
self.finalize_unix()
else:
self.finalize_other()
self.dump_dirs("post-finalize_{unix,other}()")
# Expand configuration variables, tilde, etc. in self.install_base
# and self.install_platbase -- that way, we can use $base or
# $platbase in the other installation directories and not worry
# about needing recursive variable expansion (shudder).
py_version = (string.split(sys.version))[0]
(prefix, exec_prefix) = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
'userbase': self.install_userbase,
'usersite': self.install_usersite,
}
self.expand_basedirs()
self.dump_dirs("post-expand_basedirs()")
# Now define config vars for the base directories so we can expand
# everything else.
self.config_vars['base'] = self.install_base
self.config_vars['platbase'] = self.install_platbase
if DEBUG:
from pprint import pprint
print "config vars:"
pprint(self.config_vars)
# Expand "~" and configuration variables in the installation
# directories.
self.expand_dirs()
self.dump_dirs("post-expand_dirs()")
# Create directories in the home dir:
if self.user:
self.create_home_path()
# Pick the actual directory to install all modules to: either
# install_purelib or install_platlib, depending on whether this
# module distribution is pure or not. Of course, if the user
# already specified install_lib, use their selection.
if self.install_lib is None:
if self.distribution.ext_modules: # has extensions: non-pure
self.install_lib = self.install_platlib
else:
self.install_lib = self.install_purelib
# Convert directories from Unix /-separated syntax to the local
# convention.
self.convert_paths('lib', 'purelib', 'platlib',
'scripts', 'data', 'headers',
'userbase', 'usersite')
# Well, we're not actually fully completely finalized yet: we still
# have to deal with 'extra_path', which is the hack for allowing
# non-packagized module distributions (hello, Numerical Python!) to
# get their own directories.
self.handle_extra_path()
self.install_libbase = self.install_lib # needed for .pth file
self.install_lib = os.path.join(self.install_lib, self.extra_dirs)
# If a new root directory was supplied, make all the installation
# dirs relative to it.
if self.root is not None:
self.change_roots('libbase', 'lib', 'purelib', 'platlib',
'scripts', 'data', 'headers')
self.dump_dirs("after prepending root")
# Find out the build directories, ie. where to install from.
self.set_undefined_options('build',
('build_base', 'build_base'),
('build_lib', 'build_lib'))
# Punt on doc directories for now -- after all, we're punting on
# documentation completely!
# finalize_options ()
def dump_dirs (self, msg):
if DEBUG:
from distutils.fancy_getopt import longopt_xlate
print msg + ":"
for opt in self.user_options:
opt_name = opt[0]
if opt_name[-1] == "=":
opt_name = opt_name[0:-1]
if opt_name in self.negative_opt:
opt_name = string.translate(self.negative_opt[opt_name],
longopt_xlate)
val = not getattr(self, opt_name)
else:
opt_name = string.translate(opt_name, longopt_xlate)
val = getattr(self, opt_name)
print " %s: %s" % (opt_name, val)
def finalize_unix (self):
if self.install_base is not None or self.install_platbase is not None:
if ((self.install_lib is None and
self.install_purelib is None and
self.install_platlib is None) or
self.install_headers is None or
self.install_scripts is None or
self.install_data is None):
raise DistutilsOptionError, \
("install-base or install-platbase supplied, but "
"installation scheme is incomplete")
return
if self.user:
if self.install_userbase is None:
raise DistutilsPlatformError(
"User base directory is not specified")
self.install_base = self.install_platbase = self.install_userbase
self.select_scheme("unix_user")
elif self.home is not None:
self.install_base = self.install_platbase = self.home
self.select_scheme("unix_home")
else:
if self.prefix is None:
if self.exec_prefix is not None:
raise DistutilsOptionError, \
"must not supply exec-prefix without prefix"
self.prefix = os.path.normpath(sys.prefix)
self.exec_prefix = os.path.normpath(sys.exec_prefix)
else:
if self.exec_prefix is None:
self.exec_prefix = self.prefix
self.install_base = self.prefix
self.install_platbase = self.exec_prefix
self.select_scheme("unix_prefix")
# finalize_unix ()
def finalize_other (self): # Windows and Mac OS for now
if self.user:
if self.install_userbase is None:
raise DistutilsPlatformError(
"User base directory is not specified")
self.install_base = self.install_platbase = self.install_userbase
self.select_scheme(os.name + "_user")
elif self.home is not None:
self.install_base = self.install_platbase = self.home
self.select_scheme("unix_home")
else:
if self.prefix is None:
self.prefix = os.path.normpath(sys.prefix)
self.install_base = self.install_platbase = self.prefix
try:
self.select_scheme(os.name)
except KeyError:
raise DistutilsPlatformError, \
"I don't know how to install stuff on '%s'" % os.name
# finalize_other ()
def select_scheme (self, name):
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def _expand_attrs (self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs (self):
self._expand_attrs(['install_base',
'install_platbase',
'root'])
def expand_dirs (self):
self._expand_attrs(['install_purelib',
'install_platlib',
'install_lib',
'install_headers',
'install_scripts',
'install_data',])
def convert_paths (self, *names):
for name in names:
attr = "install_" + name
setattr(self, attr, convert_path(getattr(self, attr)))
def handle_extra_path (self):
if self.extra_path is None:
self.extra_path = self.distribution.extra_path
if self.extra_path is not None:
if type(self.extra_path) is StringType:
self.extra_path = string.split(self.extra_path, ',')
if len(self.extra_path) == 1:
path_file = extra_dirs = self.extra_path[0]
elif len(self.extra_path) == 2:
(path_file, extra_dirs) = self.extra_path
else:
raise DistutilsOptionError, \
("'extra_path' option must be a list, tuple, or "
"comma-separated string with 1 or 2 elements")
# convert to local form in case Unix notation used (as it
# should be in setup scripts)
extra_dirs = convert_path(extra_dirs)
else:
path_file = None
extra_dirs = ''
# XXX should we warn if path_file and not extra_dirs? (in which
# case the path file would be harmless but pointless)
self.path_file = path_file
self.extra_dirs = extra_dirs
# handle_extra_path ()
def change_roots (self, *names):
for name in names:
attr = "install_" + name
setattr(self, attr, change_root(self.root, getattr(self, attr)))
def create_home_path(self):
"""Create directories under ~
"""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in self.config_vars.iteritems():
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0700)" % path)
os.makedirs(path, 0700)
# -- Command execution methods -------------------------------------
def run (self):
# Obviously have to build before we can install
if not self.skip_build:
self.run_command('build')
# If we built for any other platform, we can't install.
build_plat = self.distribution.get_command_obj('build').plat_name
# check warn_dir - it is a clue that the 'install' is happening
# internally, and not to sys.path, so we don't check the platform
# matches what we are running.
if self.warn_dir and build_plat != get_platform():
raise DistutilsPlatformError("Can't install when "
"cross-compiling")
# Run all sub-commands (at least those that need to be run)
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
if self.path_file:
self.create_path_file()
# write list of installed files, if requested.
if self.record:
outputs = self.get_outputs()
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in xrange(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
self.execute(write_file,
(self.record, outputs),
"writing list of installed files to '%s'" %
self.record)
sys_path = map(os.path.normpath, sys.path)
sys_path = map(os.path.normcase, sys_path)
install_lib = os.path.normcase(os.path.normpath(self.install_lib))
if (self.warn_dir and
not (self.path_file and self.install_path_file) and
install_lib not in sys_path):
log.debug(("modules installed to '%s', which is not in "
"Python's module search path (sys.path) -- "
"you'll have to change the search path yourself"),
self.install_lib)
# run ()
def create_path_file (self):
filename = os.path.join(self.install_libbase,
self.path_file + ".pth")
if self.install_path_file:
self.execute(write_file,
(filename, [self.extra_dirs]),
"creating %s" % filename)
else:
self.warn("path file '%s' not created" % filename)
# -- Reporting methods ---------------------------------------------
def get_outputs (self):
# Assemble the outputs of all the sub-commands.
outputs = []
for cmd_name in self.get_sub_commands():
cmd = self.get_finalized_command(cmd_name)
# Add the contents of cmd.get_outputs(), ensuring
# that outputs doesn't contain duplicate entries
for filename in cmd.get_outputs():
if filename not in outputs:
outputs.append(filename)
if self.path_file and self.install_path_file:
outputs.append(os.path.join(self.install_libbase,
self.path_file + ".pth"))
return outputs
def get_inputs (self):
# XXX gee, this looks familiar ;-(
inputs = []
for cmd_name in self.get_sub_commands():
cmd = self.get_finalized_command(cmd_name)
inputs.extend(cmd.get_inputs())
return inputs
# -- Predicates for sub-command list -------------------------------
def has_lib (self):
"""Return true if the current distribution has any Python
modules to install."""
return (self.distribution.has_pure_modules() or
self.distribution.has_ext_modules())
def has_headers (self):
return self.distribution.has_headers()
def has_scripts (self):
return self.distribution.has_scripts()
def has_data (self):
return self.distribution.has_data_files()
# 'sub_commands': a list of commands this command might have to run to
# get its work done. See cmd.py for more info.
sub_commands = [('install_lib', has_lib),
('install_headers', has_headers),
('install_scripts', has_scripts),
('install_data', has_data),
('install_egg_info', lambda self:True),
]
# class install
| bsd-2-clause |
wrapp/AutobahnPython | examples/websocket/echo/client_reconnecting.py | 15 | 2634 | ###############################################################################
##
## Copyright 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
from twisted.internet import reactor
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.python import log
from autobahn.websocket import WebSocketClientFactory, \
WebSocketClientProtocol, \
connectWS
class EchoClientProtocol(WebSocketClientProtocol):
def sendHello(self):
self.sendMessage("Hello, world!")
def onOpen(self):
self.sendHello()
def onMessage(self, msg, binary):
print "Got echo: " + msg
reactor.callLater(1, self.sendHello)
class EchoClientFactory(ReconnectingClientFactory, WebSocketClientFactory):
protocol = EchoClientProtocol
## http://twistedmatrix.com/documents/current/api/twisted.internet.protocol.ReconnectingClientFactory.html
##
maxDelay = 10
maxRetries = 5
def startedConnecting(self, connector):
print 'Started to connect.'
def clientConnectionLost(self, connector, reason):
print 'Lost connection. Reason:', reason
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
def clientConnectionFailed(self, connector, reason):
print 'Connection failed. Reason:', reason
ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Need the WebSocket server address, i.e. ws://localhost:9000"
sys.exit(1)
if len(sys.argv) > 2 and sys.argv[2] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
factory = EchoClientFactory(sys.argv[1],
debug = debug,
debugCodePaths = debug)
# uncomment to use Hixie-76 protocol
#factory.setProtocolOptions(allowHixie76 = True, version = 0)
connectWS(factory)
reactor.run()
| apache-2.0 |
mscuthbert/abjad | abjad/tools/indicatortools/StringNumber.py | 2 | 3561 | # -*- encoding: utf-8 -*-
import collections
from abjad.tools.abctools import AbjadValueObject
class StringNumber(AbjadValueObject):
r'''String number indicator.
.. container:: example
**Example 1.** String I:
::
>>> indicator = indicatortools.StringNumber(1)
>>> print(format(indicator))
indicatortools.StringNumber(
numbers=(1,),
)
.. container:: example
**Example 2.** Strings II and III:
::
>>> indicator = indicatortools.StringNumber((2, 3))
>>> print(format(indicator))
indicatortools.StringNumber(
numbers=(2, 3),
)
'''
### CLASS VARIABLES
__slots__ = (
'_default_scope',
'_numbers',
)
### INITIALIZER ###
def __init__(
self,
numbers=None,
):
self._default_scope = None
numbers = numbers or ()
if isinstance(numbers, type(self)):
numbers = numbers.numbers
elif not isinstance(numbers, collections.Sequence):
numbers = (numbers,)
numbers = tuple(int(x) for x in numbers)
assert all(0 < x < 7 for x in numbers)
self._numbers = tuple(numbers)
### PUBLIC PROPERTIES ###
@property
def default_scope(self):
r'''Gets default scope of string number indicator.
.. container:: example
**Example 1.** String I:
::
>>> indicator = indicatortools.StringNumber(1)
>>> indicator.default_scope is None
True
.. container:: example
**Example 2.** Strings II and III:
::
>>> indicator = indicatortools.StringNumber((2, 3))
>>> indicator.default_scope is None
True
Returns none.
'''
return self._default_scope
@property
def numbers(self):
r'''Gets numbers of string number indicator:
.. container:: example
**Example 1.** String I:
::
>>> indicator = indicatortools.StringNumber(1)
>>> indicator.numbers
(1,)
.. container:: example
**Example 2.** Strings II and III:
::
>>> indicator = indicatortools.StringNumber((2, 3))
>>> indicator.numbers
(2, 3)
Set to tuple of zero or more positive integers.
Defaults to empty tuple.
Returns tuple of zero or more positive integers.
'''
return self._numbers
@property
def roman_numerals(self):
r'''Gets roman numerals of string number indicator.
.. container:: example
**Example 1.** String I:
::
>>> indicator = indicatortools.StringNumber(1)
>>> indicator.roman_numerals
('i',)
.. container:: example
**Example 2.** Strings II and III:
::
>>> indicator = indicatortools.StringNumber((2, 3))
>>> indicator.roman_numerals
('ii', 'iii')
Returns tuple of zero or more strings.
'''
numerals = ('i', 'ii', 'iii', 'iv', 'v', 'vi')
result = []
for x in self.numbers:
numeral = numerals[x - 1]
result.append(numeral)
return tuple(result) | gpl-3.0 |
kmonsoor/python-for-android | python3-alpha/python3-src/Lib/encodings/cp1253.py | 272 | 13094 | """ Python Character Mapping Codec cp1253 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1253.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1253',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\ufffe' # 0x81 -> UNDEFINED
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\ufffe' # 0x88 -> UNDEFINED
'\u2030' # 0x89 -> PER MILLE SIGN
'\ufffe' # 0x8A -> UNDEFINED
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\ufffe' # 0x8C -> UNDEFINED
'\ufffe' # 0x8D -> UNDEFINED
'\ufffe' # 0x8E -> UNDEFINED
'\ufffe' # 0x8F -> UNDEFINED
'\ufffe' # 0x90 -> UNDEFINED
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\ufffe' # 0x98 -> UNDEFINED
'\u2122' # 0x99 -> TRADE MARK SIGN
'\ufffe' # 0x9A -> UNDEFINED
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\ufffe' # 0x9C -> UNDEFINED
'\ufffe' # 0x9D -> UNDEFINED
'\ufffe' # 0x9E -> UNDEFINED
'\ufffe' # 0x9F -> UNDEFINED
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u0385' # 0xA1 -> GREEK DIALYTIKA TONOS
'\u0386' # 0xA2 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\ufffe' # 0xAA -> UNDEFINED
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\u2015' # 0xAF -> HORIZONTAL BAR
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\u0384' # 0xB4 -> GREEK TONOS
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\u0388' # 0xB8 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
'\u0389' # 0xB9 -> GREEK CAPITAL LETTER ETA WITH TONOS
'\u038a' # 0xBA -> GREEK CAPITAL LETTER IOTA WITH TONOS
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u038c' # 0xBC -> GREEK CAPITAL LETTER OMICRON WITH TONOS
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\u038e' # 0xBE -> GREEK CAPITAL LETTER UPSILON WITH TONOS
'\u038f' # 0xBF -> GREEK CAPITAL LETTER OMEGA WITH TONOS
'\u0390' # 0xC0 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
'\u0391' # 0xC1 -> GREEK CAPITAL LETTER ALPHA
'\u0392' # 0xC2 -> GREEK CAPITAL LETTER BETA
'\u0393' # 0xC3 -> GREEK CAPITAL LETTER GAMMA
'\u0394' # 0xC4 -> GREEK CAPITAL LETTER DELTA
'\u0395' # 0xC5 -> GREEK CAPITAL LETTER EPSILON
'\u0396' # 0xC6 -> GREEK CAPITAL LETTER ZETA
'\u0397' # 0xC7 -> GREEK CAPITAL LETTER ETA
'\u0398' # 0xC8 -> GREEK CAPITAL LETTER THETA
'\u0399' # 0xC9 -> GREEK CAPITAL LETTER IOTA
'\u039a' # 0xCA -> GREEK CAPITAL LETTER KAPPA
'\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMDA
'\u039c' # 0xCC -> GREEK CAPITAL LETTER MU
'\u039d' # 0xCD -> GREEK CAPITAL LETTER NU
'\u039e' # 0xCE -> GREEK CAPITAL LETTER XI
'\u039f' # 0xCF -> GREEK CAPITAL LETTER OMICRON
'\u03a0' # 0xD0 -> GREEK CAPITAL LETTER PI
'\u03a1' # 0xD1 -> GREEK CAPITAL LETTER RHO
'\ufffe' # 0xD2 -> UNDEFINED
'\u03a3' # 0xD3 -> GREEK CAPITAL LETTER SIGMA
'\u03a4' # 0xD4 -> GREEK CAPITAL LETTER TAU
'\u03a5' # 0xD5 -> GREEK CAPITAL LETTER UPSILON
'\u03a6' # 0xD6 -> GREEK CAPITAL LETTER PHI
'\u03a7' # 0xD7 -> GREEK CAPITAL LETTER CHI
'\u03a8' # 0xD8 -> GREEK CAPITAL LETTER PSI
'\u03a9' # 0xD9 -> GREEK CAPITAL LETTER OMEGA
'\u03aa' # 0xDA -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
'\u03ab' # 0xDB -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
'\u03ac' # 0xDC -> GREEK SMALL LETTER ALPHA WITH TONOS
'\u03ad' # 0xDD -> GREEK SMALL LETTER EPSILON WITH TONOS
'\u03ae' # 0xDE -> GREEK SMALL LETTER ETA WITH TONOS
'\u03af' # 0xDF -> GREEK SMALL LETTER IOTA WITH TONOS
'\u03b0' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
'\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA
'\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA
'\u03b3' # 0xE3 -> GREEK SMALL LETTER GAMMA
'\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA
'\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON
'\u03b6' # 0xE6 -> GREEK SMALL LETTER ZETA
'\u03b7' # 0xE7 -> GREEK SMALL LETTER ETA
'\u03b8' # 0xE8 -> GREEK SMALL LETTER THETA
'\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA
'\u03ba' # 0xEA -> GREEK SMALL LETTER KAPPA
'\u03bb' # 0xEB -> GREEK SMALL LETTER LAMDA
'\u03bc' # 0xEC -> GREEK SMALL LETTER MU
'\u03bd' # 0xED -> GREEK SMALL LETTER NU
'\u03be' # 0xEE -> GREEK SMALL LETTER XI
'\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON
'\u03c0' # 0xF0 -> GREEK SMALL LETTER PI
'\u03c1' # 0xF1 -> GREEK SMALL LETTER RHO
'\u03c2' # 0xF2 -> GREEK SMALL LETTER FINAL SIGMA
'\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA
'\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU
'\u03c5' # 0xF5 -> GREEK SMALL LETTER UPSILON
'\u03c6' # 0xF6 -> GREEK SMALL LETTER PHI
'\u03c7' # 0xF7 -> GREEK SMALL LETTER CHI
'\u03c8' # 0xF8 -> GREEK SMALL LETTER PSI
'\u03c9' # 0xF9 -> GREEK SMALL LETTER OMEGA
'\u03ca' # 0xFA -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
'\u03cb' # 0xFB -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
'\u03cc' # 0xFC -> GREEK SMALL LETTER OMICRON WITH TONOS
'\u03cd' # 0xFD -> GREEK SMALL LETTER UPSILON WITH TONOS
'\u03ce' # 0xFE -> GREEK SMALL LETTER OMEGA WITH TONOS
'\ufffe' # 0xFF -> UNDEFINED
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
jkenn99/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/metastep.py | 145 | 2398 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.steps.abstractstep import AbstractStep
# FIXME: Unify with StepSequence? I'm not sure yet which is the better design.
class MetaStep(AbstractStep):
substeps = [] # Override in subclasses
def __init__(self, tool, options):
AbstractStep.__init__(self, tool, options)
self._step_instances = []
for step_class in self.substeps:
self._step_instances.append(step_class(tool, options))
@staticmethod
def _collect_options_from_steps(steps):
collected_options = []
for step in steps:
collected_options = collected_options + step.options()
return collected_options
@classmethod
def options(cls):
return cls._collect_options_from_steps(cls.substeps)
def run(self, state):
for step in self._step_instances:
step.run(state)
| bsd-3-clause |
arokem/scipy | scipy/io/harwell_boeing/tests/test_hb.py | 1 | 2366 | from __future__ import division, print_function, absolute_import
from io import StringIO
import tempfile
import numpy as np
from numpy.testing import assert_equal, \
assert_array_almost_equal_nulp
from scipy.sparse import coo_matrix, csc_matrix, rand
from scipy.io import hb_read, hb_write
SIMPLE = """\
No Title |No Key
9 4 1 4
RUA 100 100 10 0
(26I3) (26I3) (3E23.15)
1 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
3 3 3 3 3 3 3 4 4 4 6 6 6 6 6 6 6 6 6 6 6 8 9 9 9 9
9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 11
37 71 89 18 30 45 70 19 25 52
2.971243799687726e-01 3.662366682877375e-01 4.786962174699534e-01
6.490068647991184e-01 6.617490424831662e-02 8.870370343191623e-01
4.196478590163001e-01 5.649603072111251e-01 9.934423887087086e-01
6.912334991524289e-01
"""
SIMPLE_MATRIX = coo_matrix(
((0.297124379969, 0.366236668288, 0.47869621747, 0.649006864799,
0.0661749042483, 0.887037034319, 0.419647859016,
0.564960307211, 0.993442388709, 0.691233499152,),
(np.array([[36, 70, 88, 17, 29, 44, 69, 18, 24, 51],
[0, 4, 58, 61, 61, 72, 72, 73, 99, 99]]))))
def assert_csc_almost_equal(r, l):
r = csc_matrix(r)
l = csc_matrix(l)
assert_equal(r.indptr, l.indptr)
assert_equal(r.indices, l.indices)
assert_array_almost_equal_nulp(r.data, l.data, 10000)
class TestHBReader(object):
def test_simple(self):
m = hb_read(StringIO(SIMPLE))
assert_csc_almost_equal(m, SIMPLE_MATRIX)
class TestHBReadWrite(object):
def check_save_load(self, value):
with tempfile.NamedTemporaryFile(mode='w+t') as file:
hb_write(file, value)
file.file.seek(0)
value_loaded = hb_read(file)
assert_csc_almost_equal(value, value_loaded)
def test_simple(self):
random_matrix = rand(10, 100, 0.1)
for matrix_format in ('coo', 'csc', 'csr', 'bsr', 'dia', 'dok', 'lil'):
matrix = random_matrix.asformat(matrix_format, copy=False)
self.check_save_load(matrix)
| bsd-3-clause |
bak1an/django | tests/model_meta/tests.py | 63 | 11907 | from django.apps import apps
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.core.exceptions import FieldDoesNotExist
from django.db.models.fields import CharField, Field, related
from django.db.models.options import EMPTY_RELATION_TREE, IMMUTABLE_WARNING
from django.test import SimpleTestCase
from .models import (
AbstractPerson, BasePerson, Child, CommonAncestor, FirstParent, Person,
ProxyPerson, Relating, Relation, SecondParent,
)
from .results import TEST_RESULTS
class OptionsBaseTests(SimpleTestCase):
def _map_related_query_names(self, res):
return tuple((o.name, m) for o, m in res)
def _map_names(self, res):
return tuple((f.name, m) for f, m in res)
def _model(self, current_model, field):
model = field.model._meta.concrete_model
return None if model == current_model else model
def _details(self, current_model, relation):
direct = isinstance(relation, Field) or isinstance(relation, GenericForeignKey)
model = relation.model._meta.concrete_model
if model == current_model:
model = None
field = relation if direct else relation.field
return relation, model, direct, bool(field.many_to_many) # many_to_many can be None
class GetFieldsTests(OptionsBaseTests):
def test_get_fields_is_immutable(self):
msg = IMMUTABLE_WARNING % "get_fields()"
for _ in range(2):
# Running unit test twice to ensure both non-cached and cached result
# are immutable.
fields = Person._meta.get_fields()
with self.assertRaisesMessage(AttributeError, msg):
fields += ["errors"]
class LabelTests(OptionsBaseTests):
def test_label(self):
for model, expected_result in TEST_RESULTS['labels'].items():
self.assertEqual(model._meta.label, expected_result)
def test_label_lower(self):
for model, expected_result in TEST_RESULTS['lower_labels'].items():
self.assertEqual(model._meta.label_lower, expected_result)
class DataTests(OptionsBaseTests):
def test_fields(self):
for model, expected_result in TEST_RESULTS['fields'].items():
fields = model._meta.fields
self.assertEqual([f.attname for f in fields], expected_result)
def test_local_fields(self):
def is_data_field(f):
return isinstance(f, Field) and not f.many_to_many
for model, expected_result in TEST_RESULTS['local_fields'].items():
fields = model._meta.local_fields
self.assertEqual([f.attname for f in fields], expected_result)
for f in fields:
self.assertEqual(f.model, model)
self.assertTrue(is_data_field(f))
def test_local_concrete_fields(self):
for model, expected_result in TEST_RESULTS['local_concrete_fields'].items():
fields = model._meta.local_concrete_fields
self.assertEqual([f.attname for f in fields], expected_result)
for f in fields:
self.assertIsNotNone(f.column)
class M2MTests(OptionsBaseTests):
def test_many_to_many(self):
for model, expected_result in TEST_RESULTS['many_to_many'].items():
fields = model._meta.many_to_many
self.assertEqual([f.attname for f in fields], expected_result)
for f in fields:
self.assertTrue(f.many_to_many and f.is_relation)
def test_many_to_many_with_model(self):
for model, expected_result in TEST_RESULTS['many_to_many_with_model'].items():
models = [self._model(model, field) for field in model._meta.many_to_many]
self.assertEqual(models, expected_result)
class RelatedObjectsTests(OptionsBaseTests):
def key_name(self, r):
return r[0]
def test_related_objects(self):
result_key = 'get_all_related_objects_with_model'
for model, expected in TEST_RESULTS[result_key].items():
objects = [
(field, self._model(model, field))
for field in model._meta.get_fields()
if field.auto_created and not field.concrete
]
self.assertEqual(
sorted(self._map_related_query_names(objects), key=self.key_name),
sorted(expected, key=self.key_name),
)
def test_related_objects_local(self):
result_key = 'get_all_related_objects_with_model_local'
for model, expected in TEST_RESULTS[result_key].items():
objects = [
(field, self._model(model, field))
for field in model._meta.get_fields(include_parents=False)
if field.auto_created and not field.concrete
]
self.assertEqual(
sorted(self._map_related_query_names(objects), key=self.key_name),
sorted(expected, key=self.key_name),
)
def test_related_objects_include_hidden(self):
result_key = 'get_all_related_objects_with_model_hidden'
for model, expected in TEST_RESULTS[result_key].items():
objects = [
(field, self._model(model, field))
for field in model._meta.get_fields(include_hidden=True)
if field.auto_created and not field.concrete
]
self.assertEqual(
sorted(self._map_names(objects), key=self.key_name),
sorted(expected, key=self.key_name)
)
def test_related_objects_include_hidden_local_only(self):
result_key = 'get_all_related_objects_with_model_hidden_local'
for model, expected in TEST_RESULTS[result_key].items():
objects = [
(field, self._model(model, field))
for field in model._meta.get_fields(include_hidden=True, include_parents=False)
if field.auto_created and not field.concrete
]
self.assertEqual(
sorted(self._map_names(objects), key=self.key_name),
sorted(expected, key=self.key_name)
)
class PrivateFieldsTests(OptionsBaseTests):
def test_private_fields(self):
for model, expected_names in TEST_RESULTS['private_fields'].items():
objects = model._meta.private_fields
self.assertEqual(sorted([f.name for f in objects]), sorted(expected_names))
class GetFieldByNameTests(OptionsBaseTests):
def test_get_data_field(self):
field_info = self._details(Person, Person._meta.get_field('data_abstract'))
self.assertEqual(field_info[1:], (BasePerson, True, False))
self.assertIsInstance(field_info[0], CharField)
def test_get_m2m_field(self):
field_info = self._details(Person, Person._meta.get_field('m2m_base'))
self.assertEqual(field_info[1:], (BasePerson, True, True))
self.assertIsInstance(field_info[0], related.ManyToManyField)
def test_get_related_object(self):
field_info = self._details(Person, Person._meta.get_field('relating_baseperson'))
self.assertEqual(field_info[1:], (BasePerson, False, False))
self.assertIsInstance(field_info[0], related.ForeignObjectRel)
def test_get_related_m2m(self):
field_info = self._details(Person, Person._meta.get_field('relating_people'))
self.assertEqual(field_info[1:], (None, False, True))
self.assertIsInstance(field_info[0], related.ForeignObjectRel)
def test_get_generic_relation(self):
field_info = self._details(Person, Person._meta.get_field('generic_relation_base'))
self.assertEqual(field_info[1:], (None, True, False))
self.assertIsInstance(field_info[0], GenericRelation)
def test_get_fields_only_searches_forward_on_apps_not_ready(self):
opts = Person._meta
# If apps registry is not ready, get_field() searches over only
# forward fields.
opts.apps.models_ready = False
try:
# 'data_abstract' is a forward field, and therefore will be found
self.assertTrue(opts.get_field('data_abstract'))
msg = (
"Person has no field named 'relating_baseperson'. The app "
"cache isn't ready yet, so if this is an auto-created related "
"field, it won't be available yet."
)
# 'data_abstract' is a reverse field, and will raise an exception
with self.assertRaisesMessage(FieldDoesNotExist, msg):
opts.get_field('relating_baseperson')
finally:
opts.apps.models_ready = True
class RelationTreeTests(SimpleTestCase):
all_models = (Relation, AbstractPerson, BasePerson, Person, ProxyPerson, Relating)
def setUp(self):
apps.clear_cache()
def test_clear_cache_clears_relation_tree(self):
# The apps.clear_cache is setUp() should have deleted all trees.
# Exclude abstract models that are not included in the Apps registry
# and have no cache.
all_models_with_cache = (m for m in self.all_models if not m._meta.abstract)
for m in all_models_with_cache:
self.assertNotIn('_relation_tree', m._meta.__dict__)
def test_first_relation_tree_access_populates_all(self):
# On first access, relation tree should have populated cache.
self.assertTrue(self.all_models[0]._meta._relation_tree)
# AbstractPerson does not have any relations, so relation_tree
# should just return an EMPTY_RELATION_TREE.
self.assertEqual(AbstractPerson._meta._relation_tree, EMPTY_RELATION_TREE)
# All the other models should already have their relation tree
# in the internal __dict__ .
all_models_but_abstractperson = (m for m in self.all_models if m is not AbstractPerson)
for m in all_models_but_abstractperson:
self.assertIn('_relation_tree', m._meta.__dict__)
def test_relations_related_objects(self):
# Testing non hidden related objects
self.assertEqual(
sorted([field.related_query_name() for field in Relation._meta._relation_tree
if not field.remote_field.field.remote_field.is_hidden()]),
sorted([
'fk_abstract_rel', 'fk_base_rel', 'fk_concrete_rel', 'fo_abstract_rel',
'fo_base_rel', 'fo_concrete_rel', 'm2m_abstract_rel',
'm2m_base_rel', 'm2m_concrete_rel'
])
)
# Testing hidden related objects
self.assertEqual(
sorted([field.related_query_name() for field in BasePerson._meta._relation_tree]),
sorted([
'+', '_relating_basepeople_hidden_+', 'BasePerson_following_abstract+',
'BasePerson_following_abstract+', 'BasePerson_following_base+', 'BasePerson_following_base+',
'BasePerson_friends_abstract+', 'BasePerson_friends_abstract+', 'BasePerson_friends_base+',
'BasePerson_friends_base+', 'BasePerson_m2m_abstract+', 'BasePerson_m2m_base+', 'Relating_basepeople+',
'Relating_basepeople_hidden+', 'followers_abstract', 'followers_base', 'friends_abstract_rel_+',
'friends_base_rel_+', 'person', 'relating_basepeople', 'relating_baseperson',
])
)
self.assertEqual([field.related_query_name() for field in AbstractPerson._meta._relation_tree], [])
class ParentListTests(SimpleTestCase):
def test_get_parent_list(self):
self.assertEqual(CommonAncestor._meta.get_parent_list(), [])
self.assertEqual(FirstParent._meta.get_parent_list(), [CommonAncestor])
self.assertEqual(SecondParent._meta.get_parent_list(), [CommonAncestor])
self.assertEqual(Child._meta.get_parent_list(), [FirstParent, SecondParent, CommonAncestor])
| bsd-3-clause |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_11_01/aio/operations/_azure_firewall_fqdn_tags_operations.py | 1 | 5095 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AzureFirewallFqdnTagsOperations:
"""AzureFirewallFqdnTagsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_all(
self,
**kwargs
) -> AsyncIterable["_models.AzureFirewallFqdnTagListResult"]:
"""Gets all the Azure Firewall FQDN Tags in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AzureFirewallFqdnTagListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_11_01.models.AzureFirewallFqdnTagListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewallFqdnTagListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AzureFirewallFqdnTagListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/azureFirewallFqdnTags'} # type: ignore
| mit |
TDAbboud/micropython | stmhal/make-stmconst.py | 13 | 9654 | """
Read in the cmsis/devinc/stm32f405xx.h header, extract relevant constants,
and create modstmconst.c.
This is not part of the automatic build process because stm32f405xx.h isn't
expected to change. After generating the file, some manual intervention is
needed to copy the new qstr definitions to qstrdefsport.h.
"""
from __future__ import print_function
import argparse
import re
# Python 2/3 compatibility
import platform
if platform.python_version_tuple()[0] == '2':
def convert_bytes_to_str(b):
return b
elif platform.python_version_tuple()[0] == '3':
def convert_bytes_to_str(b):
try:
return str(b, 'utf8')
except ValueError:
# some files have invalid utf8 bytes, so filter them out
return ''.join(chr(l) for l in b if l <= 126)
# end compatibility code
# given a list of (name,regex) pairs, find the first one that matches the given line
def re_match_first(regexs, line):
for name, regex in regexs:
match = re.match(regex, line)
if match:
return name, match
return None, None
class LexerError(Exception):
def __init__(self, line):
self.line = line
class Lexer:
re_io_reg = r'__IO uint(?P<bits>8|16|32)_t +(?P<reg>[A-Z0-9]+)'
re_comment = r'(?P<comment>[A-Za-z0-9 \-/_()&]+)'
re_addr_offset = r'Address offset: (?P<offset>0x[0-9A-Z]{2,3})'
regexs = (
('#define hex', re.compile(r'#define +(?P<id>[A-Z0-9_]+) +(?:\(\(uint32_t\))?(?P<hex>0x[0-9A-F]+)U?(?:\))?($| +/\*)')),
('#define X', re.compile(r'#define +(?P<id>[A-Z0-9_]+) +(?P<id2>[A-Z0-9_]+)($| +/\*)')),
('#define X+hex', re.compile(r'#define +(?P<id>[A-Za-z0-9_]+) +\((?P<id2>[A-Z0-9_]+) \+ (?P<hex>0x[0-9A-F]+)U?\)($| +/\*)')),
('#define typedef', re.compile(r'#define +(?P<id>[A-Z0-9_]+(ext)?) +\(\([A-Za-z0-9_]+_TypeDef \*\) (?P<id2>[A-Za-z0-9_]+)\)($| +/\*)')),
('typedef struct', re.compile(r'typedef struct$')),
('{', re.compile(r'{$')),
('}', re.compile(r'}$')),
('} TypeDef', re.compile(r'} *(?P<id>[A-Z][A-Za-z0-9_]+)_(?P<global>([A-Za-z0-9_]+)?)TypeDef;$')),
('IO reg', re.compile(re_io_reg + r'; +/\*!< ' + re_comment + r', +' + re_addr_offset + r' *\*/')),
('IO reg array', re.compile(re_io_reg + r'\[(?P<array>[2-8])\]; +/\*!< ' + re_comment + r', +' + re_addr_offset + r'-(0x[0-9A-Z]{2,3}) *\*/')),
)
def __init__(self, filename):
self.file = open(filename, 'rb')
self.line_number = 0
def next_match(self, strictly_next=False):
while True:
line = self.file.readline()
line = convert_bytes_to_str(line)
self.line_number += 1
if len(line) == 0:
return ('EOF', None)
match = re_match_first(Lexer.regexs, line.strip())
if strictly_next or match[0] is not None:
return match
def must_match(self, kind):
match = self.next_match(strictly_next=True)
if match[0] != kind:
raise LexerError(self.line_number)
return match
def parse_file(filename):
lexer = Lexer(filename)
reg_defs = {}
consts = {}
periphs = []
while True:
m = lexer.next_match()
if m[0] == 'EOF':
break
elif m[0] == '#define hex':
d = m[1].groupdict()
consts[d['id']] = int(d['hex'], base=16)
elif m[0] == '#define X':
d = m[1].groupdict()
if d['id2'] in consts:
consts[d['id']] = consts[d['id2']]
elif m[0] == '#define X+hex':
d = m[1].groupdict()
if d['id2'] in consts:
consts[d['id']] = consts[d['id2']] + int(d['hex'], base=16)
elif m[0] == '#define typedef':
d = m[1].groupdict()
if d['id2'] in consts:
periphs.append((d['id'], consts[d['id2']]))
elif m[0] == 'typedef struct':
lexer.must_match('{')
m = lexer.next_match()
regs = []
while m[0] in ('IO reg', 'IO reg array'):
d = m[1].groupdict()
reg = d['reg']
offset = int(d['offset'], base=16)
bits = int(d['bits'])
comment = d['comment']
if m[0] == 'IO reg':
regs.append((reg, offset, bits, comment))
else:
for i in range(int(d['array'])):
regs.append((reg + str(i), offset + i * bits // 8, bits, comment))
m = lexer.next_match()
if m[0] == '}':
pass
elif m[0] == '} TypeDef':
reg_defs[m[1].groupdict()['id']] = regs
else:
raise LexerError(lexer.line_number)
return periphs, reg_defs
def print_int_obj(val, needed_mpzs):
if -0x40000000 <= val < 0x40000000:
print('MP_OBJ_NEW_SMALL_INT(%#x)' % val, end='')
else:
print('(mp_obj_t)&mpz_%08x' % val, end='')
needed_mpzs.add(val)
def print_periph(periph_name, periph_val, needed_qstrs, needed_mpzs):
qstr = periph_name.upper()
print('{ MP_OBJ_NEW_QSTR(MP_QSTR_%s), ' % qstr, end='')
print_int_obj(periph_val, needed_mpzs)
print(' },')
needed_qstrs.add(qstr)
def print_regs(reg_name, reg_defs, needed_qstrs, needed_mpzs):
reg_name = reg_name.upper()
for r in reg_defs:
qstr = reg_name + '_' + r[0]
print('{ MP_OBJ_NEW_QSTR(MP_QSTR_%s), ' % qstr, end='')
print_int_obj(r[1], needed_mpzs)
print(' }, // %s-bits, %s' % (r[2], r[3]))
needed_qstrs.add(qstr)
# This version of print regs groups registers together into submodules (eg GPIO submodule).
# This makes the qstrs shorter, and makes the list of constants more manageable (since
# they are not all in one big module) but it is then harder to compile the constants, and
# is more cumbersome to access.
# As such, we don't use this version.
# And for the number of constants we have, this function seems to use about the same amount
# of ROM as print_regs.
def print_regs_as_submodules(reg_name, reg_defs, modules, needed_qstrs):
mod_name_lower = reg_name.lower() + '_'
mod_name_upper = mod_name_lower.upper()
modules.append((mod_name_lower, mod_name_upper))
print("""
STATIC const mp_rom_map_elem_t stm_%s_globals_table[] = {
{ MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_%s) },
""" % (mod_name_lower, mod_name_upper))
needed_qstrs.add(mod_name_upper)
for r in reg_defs:
print(' { MP_ROM_QSTR(MP_QSTR_%s), MP_ROM_INT(%#x) }, // %s-bits, %s' % (r[0], r[1], r[2], r[3]))
needed_qstrs.add(r[0])
print("""};
STATIC MP_DEFINE_CONST_DICT(stm_%s_globals, stm_%s_globals_table);
const mp_obj_module_t stm_%s_obj = {
.base = { &mp_type_module },
.name = MP_QSTR_%s,
.globals = (mp_obj_dict_t*)&stm_%s_globals,
};
""" % (mod_name_lower, mod_name_lower, mod_name_lower, mod_name_upper, mod_name_lower))
def main():
cmd_parser = argparse.ArgumentParser(description='Extract ST constants from a C header file.')
cmd_parser.add_argument('file', nargs=1, help='input file')
cmd_parser.add_argument('-q', '--qstr', dest='qstr_filename', default='build/stmconst_qstr.h',
help='Specified the name of the generated qstr header file')
cmd_parser.add_argument('--mpz', dest='mpz_filename', default='build/stmconst_mpz.h',
help='the destination file of the generated mpz header')
args = cmd_parser.parse_args()
periphs, reg_defs = parse_file(args.file[0])
# add legacy GPIO constants that were removed when upgrading CMSIS
if 'GPIO' in reg_defs and 'stm32f4' in args.file[0]:
reg_defs['GPIO'].append(['BSRRL', 0x18, 16, 'legacy register'])
reg_defs['GPIO'].append(['BSRRH', 0x1a, 16, 'legacy register'])
modules = []
needed_qstrs = set()
needed_mpzs = set()
print("// Automatically generated from %s by make-stmconst.py" % args.file[0])
print("")
for periph_name, periph_val in periphs:
print_periph(periph_name, periph_val, needed_qstrs, needed_mpzs)
for reg in (
'ADC',
#'ADC_Common',
#'CAN_TxMailBox',
#'CAN_FIFOMailBox',
#'CAN_FilterRegister',
#'CAN',
'CRC',
'DAC',
'DBGMCU',
'DMA_Stream',
'DMA',
'EXTI',
'FLASH',
'GPIO',
'SYSCFG',
'I2C',
'IWDG',
'PWR',
'RCC',
'RTC',
#'SDIO',
'SPI',
'TIM',
'USART',
'WWDG',
'RNG',
):
if reg in reg_defs:
print_regs(reg, reg_defs[reg], needed_qstrs, needed_mpzs)
#print_regs_as_submodules(reg, reg_defs[reg], modules, needed_qstrs)
#print("#define MOD_STM_CONST_MODULES \\")
#for mod_lower, mod_upper in modules:
# print(" { MP_OBJ_NEW_QSTR(MP_QSTR_%s), (mp_obj_t)&stm_%s_obj }, \\" % (mod_upper, mod_lower))
print("")
with open(args.qstr_filename, 'wt') as qstr_file:
for qstr in sorted(needed_qstrs):
print('Q({})'.format(qstr), file=qstr_file)
with open(args.mpz_filename, 'wt') as mpz_file:
for mpz in sorted(needed_mpzs):
assert 0 <= mpz <= 0xffffffff
print('STATIC const mp_obj_int_t mpz_%08x = {{&mp_type_int}, '
'{.neg=0, .fixed_dig=1, .alloc=2, .len=2, ' '.dig=(uint16_t[]){%#x, %#x}}};'
% (mpz, mpz & 0xffff, (mpz >> 16) & 0xffff), file=mpz_file)
if __name__ == "__main__":
main()
| mit |
iiisthu/sparkSdn | python/pyspark/context.py | 3 | 15828 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import sys
from threading import Lock
from tempfile import NamedTemporaryFile
from pyspark import accumulators
from pyspark.accumulators import Accumulator
from pyspark.broadcast import Broadcast
from pyspark.conf import SparkConf
from pyspark.files import SparkFiles
from pyspark.java_gateway import launch_gateway
from pyspark.serializers import PickleSerializer, BatchedSerializer, UTF8Deserializer
from pyspark.storagelevel import StorageLevel
from pyspark.rdd import RDD
from py4j.java_collections import ListConverter
class SparkContext(object):
"""
Main entry point for Spark functionality. A SparkContext represents the
connection to a Spark cluster, and can be used to create L{RDD}s and
broadcast variables on that cluster.
"""
_gateway = None
_jvm = None
_writeToFile = None
_next_accum_id = 0
_active_spark_context = None
_lock = Lock()
_python_includes = None # zip and egg files that need to be added to PYTHONPATH
def __init__(self, master=None, appName=None, sparkHome=None, pyFiles=None,
environment=None, batchSize=1024, serializer=PickleSerializer(), conf=None):
"""
Create a new SparkContext. At least the master and app name should be set,
either through the named parameters here or through C{conf}.
@param master: Cluster URL to connect to
(e.g. mesos://host:port, spark://host:port, local[4]).
@param appName: A name for your job, to display on the cluster web UI.
@param sparkHome: Location where Spark is installed on cluster nodes.
@param pyFiles: Collection of .zip or .py files to send to the cluster
and add to PYTHONPATH. These can be paths on the local file
system or HDFS, HTTP, HTTPS, or FTP URLs.
@param environment: A dictionary of environment variables to set on
worker nodes.
@param batchSize: The number of Python objects represented as a single
Java object. Set 1 to disable batching or -1 to use an
unlimited batch size.
@param serializer: The serializer for RDDs.
@param conf: A L{SparkConf} object setting Spark properties.
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
SparkContext._ensure_initialized(self)
self.environment = environment or {}
self._conf = conf or SparkConf(_jvm=self._jvm)
self._batchSize = batchSize # -1 represents an unlimited batch size
self._unbatched_serializer = serializer
if batchSize == 1:
self.serializer = self._unbatched_serializer
else:
self.serializer = BatchedSerializer(self._unbatched_serializer,
batchSize)
# Set any parameters passed directly to us on the conf
if master:
self._conf.setMaster(master)
if appName:
self._conf.setAppName(appName)
if sparkHome:
self._conf.setSparkHome(sparkHome)
if environment:
for key, value in environment.iteritems():
self._conf.setExecutorEnv(key, value)
# Check that we have at least the required parameters
if not self._conf.contains("spark.master"):
raise Exception("A master URL must be set in your configuration")
if not self._conf.contains("spark.app.name"):
raise Exception("An application name must be set in your configuration")
# Read back our properties from the conf in case we loaded some of them from
# the classpath or an external config file
self.master = self._conf.get("spark.master")
self.appName = self._conf.get("spark.app.name")
self.sparkHome = self._conf.get("spark.home", None)
for (k, v) in self._conf.getAll():
if k.startswith("spark.executorEnv."):
varName = k[len("spark.executorEnv."):]
self.environment[varName] = v
# Create the Java SparkContext through Py4J
self._jsc = self._jvm.JavaSparkContext(self._conf._jconf)
# Create a single Accumulator in Java that we'll send all our updates through;
# they will be passed back to us through a TCP server
self._accumulatorServer = accumulators._start_update_server()
(host, port) = self._accumulatorServer.server_address
self._javaAccumulator = self._jsc.accumulator(
self._jvm.java.util.ArrayList(),
self._jvm.PythonAccumulatorParam(host, port))
self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python')
# Broadcast's __reduce__ method stores Broadcast instances here.
# This allows other code to determine which Broadcast instances have
# been pickled, so it can determine which Java broadcast objects to
# send.
self._pickled_broadcast_vars = set()
SparkFiles._sc = self
root_dir = SparkFiles.getRootDirectory()
sys.path.append(root_dir)
# Deploy any code dependencies specified in the constructor
self._python_includes = list()
for path in (pyFiles or []):
self.addPyFile(path)
# Create a temporary directory inside spark.local.dir:
local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(self._jsc.sc().conf())
self._temp_dir = \
self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir).getAbsolutePath()
@classmethod
def _ensure_initialized(cls, instance=None):
with SparkContext._lock:
if not SparkContext._gateway:
SparkContext._gateway = launch_gateway()
SparkContext._jvm = SparkContext._gateway.jvm
SparkContext._writeToFile = SparkContext._jvm.PythonRDD.writeToFile
if instance:
if SparkContext._active_spark_context and SparkContext._active_spark_context != instance:
raise ValueError("Cannot run multiple SparkContexts at once")
else:
SparkContext._active_spark_context = instance
@classmethod
def setSystemProperty(cls, key, value):
"""
Set a Java system property, such as spark.executor.memory. This must
must be invoked before instantiating SparkContext.
"""
SparkContext._ensure_initialized()
SparkContext._jvm.java.lang.System.setProperty(key, value)
@property
def defaultParallelism(self):
"""
Default level of parallelism to use when not given by user (e.g. for
reduce tasks)
"""
return self._jsc.sc().defaultParallelism()
def __del__(self):
self.stop()
def stop(self):
"""
Shut down the SparkContext.
"""
if self._jsc:
self._jsc.stop()
self._jsc = None
if self._accumulatorServer:
self._accumulatorServer.shutdown()
self._accumulatorServer = None
with SparkContext._lock:
SparkContext._active_spark_context = None
def parallelize(self, c, numSlices=None):
"""
Distribute a local Python collection to form an RDD.
>>> sc.parallelize(range(5), 5).glom().collect()
[[0], [1], [2], [3], [4]]
"""
numSlices = numSlices or self.defaultParallelism
# Calling the Java parallelize() method with an ArrayList is too slow,
# because it sends O(n) Py4J commands. As an alternative, serialized
# objects are written to a file and loaded through textFile().
tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
# Make sure we distribute data evenly if it's smaller than self.batchSize
if "__len__" not in dir(c):
c = list(c) # Make it a list so we can compute its length
batchSize = min(len(c) // numSlices, self._batchSize)
if batchSize > 1:
serializer = BatchedSerializer(self._unbatched_serializer,
batchSize)
else:
serializer = self._unbatched_serializer
serializer.dump_stream(c, tempFile)
tempFile.close()
readRDDFromFile = self._jvm.PythonRDD.readRDDFromFile
jrdd = readRDDFromFile(self._jsc, tempFile.name, numSlices)
return RDD(jrdd, self, serializer)
def textFile(self, name, minSplits=None):
"""
Read a text file from HDFS, a local file system (available on all
nodes), or any Hadoop-supported file system URI, and return it as an
RDD of Strings.
"""
minSplits = minSplits or min(self.defaultParallelism, 2)
return RDD(self._jsc.textFile(name, minSplits), self,
UTF8Deserializer())
def _checkpointFile(self, name, input_deserializer):
jrdd = self._jsc.checkpointFile(name)
return RDD(jrdd, self, input_deserializer)
def union(self, rdds):
"""
Build the union of a list of RDDs.
This supports unions() of RDDs with different serialized formats,
although this forces them to be reserialized using the default
serializer:
>>> path = os.path.join(tempdir, "union-text.txt")
>>> with open(path, "w") as testFile:
... testFile.write("Hello")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello']
>>> parallelized = sc.parallelize(["World!"])
>>> sorted(sc.union([textFile, parallelized]).collect())
[u'Hello', 'World!']
"""
first_jrdd_deserializer = rdds[0]._jrdd_deserializer
if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds):
rdds = [x._reserialize() for x in rdds]
first = rdds[0]._jrdd
rest = [x._jrdd for x in rdds[1:]]
rest = ListConverter().convert(rest, self._gateway._gateway_client)
return RDD(self._jsc.union(first, rest), self,
rdds[0]._jrdd_deserializer)
def broadcast(self, value):
"""
Broadcast a read-only variable to the cluster, returning a
L{Broadcast<pyspark.broadcast.Broadcast>}
object for reading it in distributed functions. The variable will be
sent to each cluster only once.
"""
pickleSer = PickleSerializer()
pickled = pickleSer.dumps(value)
jbroadcast = self._jsc.broadcast(bytearray(pickled))
return Broadcast(jbroadcast.id(), value, jbroadcast,
self._pickled_broadcast_vars)
def accumulator(self, value, accum_param=None):
"""
Create an L{Accumulator} with the given initial value, using a given
L{AccumulatorParam} helper object to define how to add values of the
data type if provided. Default AccumulatorParams are used for integers
and floating-point numbers if you do not provide one. For other types,
a custom AccumulatorParam can be used.
"""
if accum_param is None:
if isinstance(value, int):
accum_param = accumulators.INT_ACCUMULATOR_PARAM
elif isinstance(value, float):
accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM
elif isinstance(value, complex):
accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM
else:
raise Exception("No default accumulator param for type %s" % type(value))
SparkContext._next_accum_id += 1
return Accumulator(SparkContext._next_accum_id - 1, value, accum_param)
def addFile(self, path):
"""
Add a file to be downloaded with this Spark job on every node.
The C{path} passed can be either a local file, a file in HDFS
(or other Hadoop-supported filesystems), or an HTTP, HTTPS or
FTP URI.
To access the file in Spark jobs, use
L{SparkFiles.get(path)<pyspark.files.SparkFiles.get>} to find its
download location.
>>> from pyspark import SparkFiles
>>> path = os.path.join(tempdir, "test.txt")
>>> with open(path, "w") as testFile:
... testFile.write("100")
>>> sc.addFile(path)
>>> def func(iterator):
... with open(SparkFiles.get("test.txt")) as testFile:
... fileVal = int(testFile.readline())
... return [x * 100 for x in iterator]
>>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()
[100, 200, 300, 400]
"""
self._jsc.sc().addFile(path)
def clearFiles(self):
"""
Clear the job's list of files added by L{addFile} or L{addPyFile} so
that they do not get downloaded to any new nodes.
"""
# TODO: remove added .py or .zip files from the PYTHONPATH?
self._jsc.sc().clearFiles()
def addPyFile(self, path):
"""
Add a .py or .zip dependency for all tasks to be executed on this
SparkContext in the future. The C{path} passed can be either a local
file, a file in HDFS (or other Hadoop-supported filesystems), or an
HTTP, HTTPS or FTP URI.
"""
self.addFile(path)
(dirname, filename) = os.path.split(path) # dirname may be directory or HDFS/S3 prefix
if filename.endswith('.zip') or filename.endswith('.ZIP') or filename.endswith('.egg'):
self._python_includes.append(filename)
sys.path.append(os.path.join(SparkFiles.getRootDirectory(), filename)) # for tests in local mode
def setCheckpointDir(self, dirName):
"""
Set the directory under which RDDs are going to be checkpointed. The
directory must be a HDFS path if running on a cluster.
"""
self._jsc.sc().setCheckpointDir(dirName)
def _getJavaStorageLevel(self, storageLevel):
"""
Returns a Java StorageLevel based on a pyspark.StorageLevel.
"""
if not isinstance(storageLevel, StorageLevel):
raise Exception("storageLevel must be of type pyspark.StorageLevel")
newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel
return newStorageLevel(storageLevel.useDisk, storageLevel.useMemory,
storageLevel.deserialized, storageLevel.replication)
def _test():
import atexit
import doctest
import tempfile
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
globs['tempdir'] = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(globs['tempdir']))
(failure_count, test_count) = doctest.testmod(globs=globs)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
feczo/hellodashboard | oauth2client/locked_file.py | 144 | 11379 | # Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Locked file interface that should work on Unix and Windows pythons.
This module first tries to use fcntl locking to ensure serialized access
to a file, then falls back on a lock file if that is unavialable.
Usage:
f = LockedFile('filename', 'r+b', 'rb')
f.open_and_lock()
if f.is_locked():
print 'Acquired filename with r+b mode'
f.file_handle().write('locked data')
else:
print 'Aquired filename with rb mode'
f.unlock_and_close()
"""
__author__ = 'cache@google.com (David T McWherter)'
import errno
import logging
import os
import time
from oauth2client import util
logger = logging.getLogger(__name__)
class CredentialsFileSymbolicLinkError(Exception):
"""Credentials files must not be symbolic links."""
class AlreadyLockedException(Exception):
"""Trying to lock a file that has already been locked by the LockedFile."""
pass
def validate_file(filename):
if os.path.islink(filename):
raise CredentialsFileSymbolicLinkError(
'File: %s is a symbolic link.' % filename)
class _Opener(object):
"""Base class for different locking primitives."""
def __init__(self, filename, mode, fallback_mode):
"""Create an Opener.
Args:
filename: string, The pathname of the file.
mode: string, The preferred mode to access the file with.
fallback_mode: string, The mode to use if locking fails.
"""
self._locked = False
self._filename = filename
self._mode = mode
self._fallback_mode = fallback_mode
self._fh = None
def is_locked(self):
"""Was the file locked."""
return self._locked
def file_handle(self):
"""The file handle to the file. Valid only after opened."""
return self._fh
def filename(self):
"""The filename that is being locked."""
return self._filename
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries.
"""
pass
def unlock_and_close(self):
"""Unlock and close the file."""
pass
class _PosixOpener(_Opener):
"""Lock files using Posix advisory lock files."""
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Tries to create a .lock file next to the file we're trying to open.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries.
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
CredentialsFileSymbolicLinkError if the file is a symbolic link.
"""
if self._locked:
raise AlreadyLockedException('File %s is already locked' %
self._filename)
self._locked = False
validate_file(self._filename)
try:
self._fh = open(self._filename, self._mode)
except IOError, e:
# If we can't access with _mode, try _fallback_mode and don't lock.
if e.errno == errno.EACCES:
self._fh = open(self._filename, self._fallback_mode)
return
lock_filename = self._posix_lockfile(self._filename)
start_time = time.time()
while True:
try:
self._lock_fd = os.open(lock_filename,
os.O_CREAT|os.O_EXCL|os.O_RDWR)
self._locked = True
break
except OSError, e:
if e.errno != errno.EEXIST:
raise
if (time.time() - start_time) >= timeout:
logger.warn('Could not acquire lock %s in %s seconds' % (
lock_filename, timeout))
# Close the file and open in fallback_mode.
if self._fh:
self._fh.close()
self._fh = open(self._filename, self._fallback_mode)
return
time.sleep(delay)
def unlock_and_close(self):
"""Unlock a file by removing the .lock file, and close the handle."""
if self._locked:
lock_filename = self._posix_lockfile(self._filename)
os.close(self._lock_fd)
os.unlink(lock_filename)
self._locked = False
self._lock_fd = None
if self._fh:
self._fh.close()
def _posix_lockfile(self, filename):
"""The name of the lock file to use for posix locking."""
return '%s.lock' % filename
try:
import fcntl
class _FcntlOpener(_Opener):
"""Open, lock, and unlock a file using fcntl.lockf."""
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
CredentialsFileSymbolicLinkError if the file is a symbolic link.
"""
if self._locked:
raise AlreadyLockedException('File %s is already locked' %
self._filename)
start_time = time.time()
validate_file(self._filename)
try:
self._fh = open(self._filename, self._mode)
except IOError, e:
# If we can't access with _mode, try _fallback_mode and don't lock.
if e.errno == errno.EACCES:
self._fh = open(self._filename, self._fallback_mode)
return
# We opened in _mode, try to lock the file.
while True:
try:
fcntl.lockf(self._fh.fileno(), fcntl.LOCK_EX)
self._locked = True
return
except IOError, e:
# If not retrying, then just pass on the error.
if timeout == 0:
raise e
if e.errno != errno.EACCES:
raise e
# We could not acquire the lock. Try again.
if (time.time() - start_time) >= timeout:
logger.warn('Could not lock %s in %s seconds' % (
self._filename, timeout))
if self._fh:
self._fh.close()
self._fh = open(self._filename, self._fallback_mode)
return
time.sleep(delay)
def unlock_and_close(self):
"""Close and unlock the file using the fcntl.lockf primitive."""
if self._locked:
fcntl.lockf(self._fh.fileno(), fcntl.LOCK_UN)
self._locked = False
if self._fh:
self._fh.close()
except ImportError:
_FcntlOpener = None
try:
import pywintypes
import win32con
import win32file
class _Win32Opener(_Opener):
"""Open, lock, and unlock a file using windows primitives."""
# Error #33:
# 'The process cannot access the file because another process'
FILE_IN_USE_ERROR = 33
# Error #158:
# 'The segment is already unlocked.'
FILE_ALREADY_UNLOCKED_ERROR = 158
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
CredentialsFileSymbolicLinkError if the file is a symbolic link.
"""
if self._locked:
raise AlreadyLockedException('File %s is already locked' %
self._filename)
start_time = time.time()
validate_file(self._filename)
try:
self._fh = open(self._filename, self._mode)
except IOError, e:
# If we can't access with _mode, try _fallback_mode and don't lock.
if e.errno == errno.EACCES:
self._fh = open(self._filename, self._fallback_mode)
return
# We opened in _mode, try to lock the file.
while True:
try:
hfile = win32file._get_osfhandle(self._fh.fileno())
win32file.LockFileEx(
hfile,
(win32con.LOCKFILE_FAIL_IMMEDIATELY|
win32con.LOCKFILE_EXCLUSIVE_LOCK), 0, -0x10000,
pywintypes.OVERLAPPED())
self._locked = True
return
except pywintypes.error, e:
if timeout == 0:
raise e
# If the error is not that the file is already in use, raise.
if e[0] != _Win32Opener.FILE_IN_USE_ERROR:
raise
# We could not acquire the lock. Try again.
if (time.time() - start_time) >= timeout:
logger.warn('Could not lock %s in %s seconds' % (
self._filename, timeout))
if self._fh:
self._fh.close()
self._fh = open(self._filename, self._fallback_mode)
return
time.sleep(delay)
def unlock_and_close(self):
"""Close and unlock the file using the win32 primitive."""
if self._locked:
try:
hfile = win32file._get_osfhandle(self._fh.fileno())
win32file.UnlockFileEx(hfile, 0, -0x10000, pywintypes.OVERLAPPED())
except pywintypes.error, e:
if e[0] != _Win32Opener.FILE_ALREADY_UNLOCKED_ERROR:
raise
self._locked = False
if self._fh:
self._fh.close()
except ImportError:
_Win32Opener = None
class LockedFile(object):
"""Represent a file that has exclusive access."""
@util.positional(4)
def __init__(self, filename, mode, fallback_mode, use_native_locking=True):
"""Construct a LockedFile.
Args:
filename: string, The path of the file to open.
mode: string, The mode to try to open the file with.
fallback_mode: string, The mode to use if locking fails.
use_native_locking: bool, Whether or not fcntl/win32 locking is used.
"""
opener = None
if not opener and use_native_locking:
if _Win32Opener:
opener = _Win32Opener(filename, mode, fallback_mode)
if _FcntlOpener:
opener = _FcntlOpener(filename, mode, fallback_mode)
if not opener:
opener = _PosixOpener(filename, mode, fallback_mode)
self._opener = opener
def filename(self):
"""Return the filename we were constructed with."""
return self._opener._filename
def file_handle(self):
"""Return the file_handle to the opened file."""
return self._opener.file_handle()
def is_locked(self):
"""Return whether we successfully locked the file."""
return self._opener.is_locked()
def open_and_lock(self, timeout=0, delay=0.05):
"""Open the file, trying to lock it.
Args:
timeout: float, The number of seconds to try to acquire the lock.
delay: float, The number of seconds to wait between retry attempts.
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
"""
self._opener.open_and_lock(timeout, delay)
def unlock_and_close(self):
"""Unlock and close a file."""
self._opener.unlock_and_close()
| apache-2.0 |
goan15910/ConvDet | src/deploy.py | 1 | 6278 |
"""Deploy"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
from datetime import datetime
import os.path
import sys
import time
import math
import numpy as np
from six.moves import xrange
import tensorflow as tf
from config import *
from dataset import pascal_voc, kitti, vid
from utils.util import bbox_transform, Timer
from nets import *
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('dataset', 'PASCAL_VOC',
"""PASCAL_VOC / KITTI""")
tf.app.flags.DEFINE_string('data_path', '/tmp3/jeff/VOCdevkit2007', """Root directory of data""")
tf.app.flags.DEFINE_string('image_set', 'test',
"""Only used for VOC data."""
"""Can be train, trainval, val, or test""")
tf.app.flags.DEFINE_string('year', '2007',
"""VOC challenge year. 2007 or 2012"""
"""Only used for VOC data""")
tf.app.flags.DEFINE_string('eval_dir', '/tmp3/jeff/ConvDet/experiments/yolo_v2/deploy',
"""Directory where to write event logs """)
tf.app.flags.DEFINE_string('pretrained_model_path', '/tmp3/jeff/ConvDet/data/yolo/yolo_weights.pkl',
"""Path to pretrained weight path.""")
tf.app.flags.DEFINE_string('net', 'yolo_v2',
"""Neural net architecture.""")
tf.app.flags.DEFINE_string('gpu', '0', """gpu id.""")
def eval_once(saver, summary_writer, imdb, model, mc):
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
# Initialize
init = tf.global_variables_initializer()
sess.run(init)
#global_step = '0'
global_step = None
n_imgs = len(imdb.image_idx)
n_iters = int(n_imgs / mc.BATCH_SIZE) + 1
all_boxes = [[[] for _ in xrange(n_imgs)]
for _ in xrange(imdb.num_classes)]
_t = {'im_detect': Timer(), 'im_read': Timer(), 'misc': Timer()}
num_detection = 0.0
for i in xrange(n_iters):
_t['im_read'].tic()
images, scales = imdb.read_image_batch(shuffle=False)
_t['im_read'].toc()
_t['im_detect'].tic()
# TODO(jeff): remove output other than det_boxes, det_probs, det_class
det_boxes, det_probs, det_class, probs, confs, \
conv13, reorg20, concat20 = sess.run(
[
model.det_boxes, model.det_probs, model.det_class,
model.probs, model.pred_conf,
model.conv13, model.reorg20, model.concat20
],
feed_dict={model.image_input:images, \
model.is_training: False, model.keep_prob: 1.0}
)
_t['im_detect'].toc()
_t['misc'].tic()
for j in range(len(det_boxes)): # batch
# rescale
det_boxes[j, :, 0::2] /= scales[j][0]
det_boxes[j, :, 1::2] /= scales[j][1]
det_bbox, score, det_class = model.filter_yolo_predict(
det_boxes[j], det_probs[j], det_class[j])
num_detection += len(det_bbox)
for c, b, s in zip(det_class, det_bbox, score):
all_boxes[c][i].append(bbox_transform(b) + [s])
_t['misc'].toc()
print ('im_detect: {:d}/{:d} im_read: {:.3f}s '
'detect: {:.3f}s misc: {:.3f}s'.format(
i+1, n_imgs, _t['im_read'].average_time,
_t['im_detect'].average_time, _t['misc'].average_time))
print ('Evaluating detections...')
aps, ap_names = imdb.evaluate_detections(
FLAGS.eval_dir, global_step, all_boxes)
print ('Evaluation summary:')
print (' Average number of detections per image: {}:'.format(
num_detection/n_imgs))
print (' Timing:')
print (' im_read: {:.3f}s detect: {:.3f}s misc: {:.3f}s'.format(
_t['im_read'].average_time, _t['im_detect'].average_time,
_t['misc'].average_time))
print (' Average precisions:')
eval_summary_ops = []
for cls, ap in zip(ap_names, aps):
eval_summary_ops.append(
tf.summary.scalar('APs/'+cls, ap)
)
print (' {}: {:.3f}'.format(cls, ap))
print (' Mean average precision: {:.3f}'.format(np.mean(aps)))
eval_summary_ops.append(
tf.summary.scalar('APs/mAP', np.mean(aps))
)
eval_summary_ops.append(
tf.summary.scalar('timing/image_detect', _t['im_detect'].average_time)
)
eval_summary_ops.append(
tf.summary.scalar('timing/image_read', _t['im_read'].average_time)
)
eval_summary_ops.append(
tf.summary.scalar('timing/post_process', _t['misc'].average_time)
)
eval_summary_ops.append(
tf.summary.scalar('num_detections_per_image', num_detection/n_imgs)
)
print ('Analyzing detections...')
stats, ims = imdb.do_detection_analysis_in_eval(
FLAGS.eval_dir, global_step)
for k, v in stats.iteritems():
eval_summary_ops.append(
tf.summary.scalar(
'Detection Analysis/'+k, v)
)
eval_summary_str = sess.run(eval_summary_ops)
for sum_str in eval_summary_str:
summary_writer.add_summary(sum_str, global_step)
def evaluate():
"""Evaluate."""
assert FLAGS.dataset in ['PASCAL_VOC', 'VID'], \
'Either PASCAL_VOC / VID'
if FLAGS.dataset == 'PASCAL_VOC':
mc = pascal_voc_yolo_config()
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
mc.LOAD_PRETRAINED_MODEL = True
imdb = pascal_voc(FLAGS.image_set, FLAGS.year, FLAGS.data_path, mc)
elif FLAGS.dataset == 'VID':
mc = vid_yolo_config()
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
mc.LOAD_PRETRAINED_MODEL = True
imdb = vid(FLAGS.image_set, FLAGS.data_path, mc)
with tf.Graph().as_default() as g:
assert FLAGS.net == 'yolo_v2', \
'Support only yolo_v2'
model = YOLO_V2(mc, FLAGS.gpu)
saver = tf.train.Saver(model.model_params)
summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)
# Evaluate only once for deployment
eval_once(saver, summary_writer, imdb, model, mc)
def main(argv=None): # pylint: disable=unused-argument
if tf.gfile.Exists(FLAGS.eval_dir):
tf.gfile.DeleteRecursively(FLAGS.eval_dir)
tf.gfile.MakeDirs(FLAGS.eval_dir)
evaluate()
if __name__ == '__main__':
tf.app.run()
| bsd-2-clause |
ugoertz/django-userena | demo/createdb.py | 19 | 2258 | import MySQLdb
import psycopg2
import os
from wsgi import *
def create_dbs():
print("create_dbs: let's go.")
django_settings = __import__(os.environ['DJANGO_SETTINGS_MODULE'], fromlist='DATABASES')
print("create_dbs: got settings.")
databases = django_settings.DATABASES
for name, db in databases.iteritems():
host = db['HOST']
user = db['USER']
password = db['PASSWORD']
port = db['PORT']
db_name = db['NAME']
db_type = db['ENGINE']
# see if it is mysql
if db_type.endswith('mysql'):
print 'creating database %s on %s' % (db_name, host)
db = MySQLdb.connect(user=user,
passwd=password,
host=host,
port=port)
cur = db.cursor()
print("Check if database is already there.")
cur.execute("""SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA
WHERE SCHEMA_NAME = %s""", (db_name,))
results = cur.fetchone()
if not results:
print("Database %s doesn't exist, lets create it." % db_name)
sql = """CREATE DATABASE IF NOT EXISTS %s """ % (db_name,)
print("> %s" % sql)
cur.execute(sql)
print(".....")
else:
print("database already exists, moving on to next step.")
# see if it is postgresql
elif db_type.endswith('postgresql_psycopg2'):
print 'creating database %s on %s' % (db_name, host)
con = psycopg2.connect(host=host, user=user, password=password, port=port, database='postgres')
con.set_isolation_level(0)
cur = con.cursor()
try:
cur.execute('CREATE DATABASE %s' % db_name)
except psycopg2.ProgrammingError as detail:
print detail
print 'moving right along...'
else:
print("ERROR: {0} is not supported by this script, you will need to create your database by hand.".format(db_type))
if __name__ == '__main__':
import sys
print("create_dbs start")
create_dbs()
print("create_dbs all done")
| bsd-3-clause |
dstenb/pylaunchr-twitch | handler.py | 1 | 1166 | from pygametk.logger import Logger
from pylaunchr.service.handler import ServiceCommandHandler
from plugin.twitch.data import TwitchGame
class TwitchHandler(ServiceCommandHandler):
# TODO: Use inherited variant
def handle_event(self, event):
Logger("twitch").debug("Received event")
callback = event.args["callback"]
callback_args = event.args["callback_args"]
result = event.args["result"]
if "exception" in result:
Logger("twitch").warning("Failed with {}", result["exception"])
callback(result, *callback_args)
def get_game(self, name, callback, callback_args=[]):
Logger("twitch").debug("Get game {}", name)
self._put("get_game", {"name": name}, callback, callback_args)
def get_game_streams(self, game, limit, offset, callback, callback_args=[]):
Logger("twitch").debug("Get game streams for {}", game)
name = game.name if isinstance(game, TwitchGame) else game
args = {"name": name, "limit": limit, "offset": offset}
self._put("get_game_streams", args, callback, callback_args)
def tick(self, tick):
return False
| mit |
felixfontein/ansible | lib/ansible/modules/tempfile.py | 11 | 3337 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Krzysztof Magosa <krzysztof@magosa.pl>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: tempfile
version_added: "2.3"
short_description: Creates temporary files and directories
description:
- The C(tempfile) module creates temporary files and directories. C(mktemp) command takes different parameters on various systems, this module helps
to avoid troubles related to that. Files/directories created by module are accessible only by creator. In case you need to make them world-accessible
you need to use M(ansible.builtin.file) module.
- For Windows targets, use the M(ansible.windows.win_tempfile) module instead.
options:
state:
description:
- Whether to create file or directory.
type: str
choices: [ directory, file ]
default: file
path:
description:
- Location where temporary file or directory should be created.
- If path is not specified, the default system temporary directory will be used.
type: path
prefix:
description:
- Prefix of file/directory name created by module.
type: str
default: ansible.
suffix:
description:
- Suffix of file/directory name created by module.
type: str
default: ""
seealso:
- module: ansible.builtin.file
- module: ansible.windows.win_tempfile
author:
- Krzysztof Magosa (@krzysztof-magosa)
'''
EXAMPLES = """
- name: Create temporary build directory
ansible.builtin.tempfile:
state: directory
suffix: build
- name: Create temporary file
ansible.builtin.tempfile:
state: file
suffix: temp
register: tempfile_1
- name: Use the registered var and the file module to remove the temporary file
ansible.builtin.file:
path: "{{ tempfile_1.path }}"
state: absent
when: tempfile_1.path is defined
"""
RETURN = '''
path:
description: Path to created file or directory.
returned: success
type: str
sample: "/tmp/ansible.bMlvdk"
'''
from os import close
from tempfile import mkstemp, mkdtemp
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='file', choices=['file', 'directory']),
path=dict(type='path'),
prefix=dict(type='str', default='ansible.'),
suffix=dict(type='str', default=''),
),
)
try:
if module.params['state'] == 'file':
handle, path = mkstemp(
prefix=module.params['prefix'],
suffix=module.params['suffix'],
dir=module.params['path'],
)
close(handle)
else:
path = mkdtemp(
prefix=module.params['prefix'],
suffix=module.params['suffix'],
dir=module.params['path'],
)
module.exit_json(changed=True, path=path)
except Exception as e:
module.fail_json(msg=to_native(e), exception=format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 |
rajatsingla28/electron | script/merge-electron-checksums.py | 32 | 1189 | #!/usr/bin/env python
# Download individual checksum files for Electron zip files from S3,
# concatenate them, and upload to GitHub.
from __future__ import print_function
import argparse
import sys
from lib.config import s3_config
from lib.util import boto_path_dirs
sys.path.extend(boto_path_dirs())
from boto.s3.connection import S3Connection
def main():
args = parse_args()
bucket_name, access_key, secret_key = s3_config()
s3 = S3Connection(access_key, secret_key)
bucket = s3.get_bucket(bucket_name)
if bucket is None:
print('S3 bucket "{}" does not exist!'.format(bucket_name), file=sys.stderr)
return 1
prefix = 'atom-shell/tmp/{0}/'.format(args.version)
shasums = [s3_object.get_contents_as_string().strip()
for s3_object in bucket.list(prefix, delimiter='/')
if s3_object.key.endswith('.sha256sum')]
print('\n'.join(shasums))
return 0
def parse_args():
parser = argparse.ArgumentParser(description='Upload SHASUMS files to GitHub')
parser.add_argument('-v', '--version', help='Specify the version',
required=True)
return parser.parse_args()
if __name__ == '__main__':
sys.exit(main())
| mit |
nmaswood/tv_scraping | lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/jisfreq.py | 3131 | 47315 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
#
# Japanese frequency table, applied to both S-JIS and EUC-JP
# They are sorted in order.
# 128 --> 0.77094
# 256 --> 0.85710
# 512 --> 0.92635
# 1024 --> 0.97130
# 2048 --> 0.99431
#
# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
#
# Typical Distribution Ratio, 25% of IDR
JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
# Char to FreqOrder table ,
JIS_TABLE_SIZE = 4368
JISCharToFreqOrder = (
40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
#Everything below is of no interest for detection purpose
2138,2122,3730,2888,1995,1820,1044,6190,6191,6192,6193,6194,6195,6196,6197,6198, # 4384
6199,6200,6201,6202,6203,6204,6205,4670,6206,6207,6208,6209,6210,6211,6212,6213, # 4400
6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,6224,6225,6226,6227,6228,6229, # 4416
6230,6231,6232,6233,6234,6235,6236,6237,3187,6238,6239,3969,6240,6241,6242,6243, # 4432
6244,4671,6245,6246,4672,6247,6248,4133,6249,6250,4364,6251,2923,2556,2613,4673, # 4448
4365,3970,6252,6253,6254,6255,4674,6256,6257,6258,2768,2353,4366,4675,4676,3188, # 4464
4367,3463,6259,4134,4677,4678,6260,2267,6261,3842,3332,4368,3543,6262,6263,6264, # 4480
3013,1954,1928,4135,4679,6265,6266,2478,3091,6267,4680,4369,6268,6269,1699,6270, # 4496
3544,4136,4681,6271,4137,6272,4370,2804,6273,6274,2593,3971,3972,4682,6275,2236, # 4512
4683,6276,6277,4684,6278,6279,4138,3973,4685,6280,6281,3258,6282,6283,6284,6285, # 4528
3974,4686,2841,3975,6286,6287,3545,6288,6289,4139,4687,4140,6290,4141,6291,4142, # 4544
6292,6293,3333,6294,6295,6296,4371,6297,3399,6298,6299,4372,3976,6300,6301,6302, # 4560
4373,6303,6304,3843,3731,6305,4688,4374,6306,6307,3259,2294,6308,3732,2530,4143, # 4576
6309,4689,6310,6311,6312,3048,6313,6314,4690,3733,2237,6315,6316,2282,3334,6317, # 4592
6318,3844,6319,6320,4691,6321,3400,4692,6322,4693,6323,3049,6324,4375,6325,3977, # 4608
6326,6327,6328,3546,6329,4694,3335,6330,4695,4696,6331,6332,6333,6334,4376,3978, # 4624
6335,4697,3979,4144,6336,3980,4698,6337,6338,6339,6340,6341,4699,4700,4701,6342, # 4640
6343,4702,6344,6345,4703,6346,6347,4704,6348,4705,4706,3135,6349,4707,6350,4708, # 4656
6351,4377,6352,4709,3734,4145,6353,2506,4710,3189,6354,3050,4711,3981,6355,3547, # 4672
3014,4146,4378,3735,2651,3845,3260,3136,2224,1986,6356,3401,6357,4712,2594,3627, # 4688
3137,2573,3736,3982,4713,3628,4714,4715,2682,3629,4716,6358,3630,4379,3631,6359, # 4704
6360,6361,3983,6362,6363,6364,6365,4147,3846,4717,6366,6367,3737,2842,6368,4718, # 4720
2628,6369,3261,6370,2386,6371,6372,3738,3984,4719,3464,4720,3402,6373,2924,3336, # 4736
4148,2866,6374,2805,3262,4380,2704,2069,2531,3138,2806,2984,6375,2769,6376,4721, # 4752
4722,3403,6377,6378,3548,6379,6380,2705,3092,1979,4149,2629,3337,2889,6381,3338, # 4768
4150,2557,3339,4381,6382,3190,3263,3739,6383,4151,4723,4152,2558,2574,3404,3191, # 4784
6384,6385,4153,6386,4724,4382,6387,6388,4383,6389,6390,4154,6391,4725,3985,6392, # 4800
3847,4155,6393,6394,6395,6396,6397,3465,6398,4384,6399,6400,6401,6402,6403,6404, # 4816
4156,6405,6406,6407,6408,2123,6409,6410,2326,3192,4726,6411,6412,6413,6414,4385, # 4832
4157,6415,6416,4158,6417,3093,3848,6418,3986,6419,6420,3849,6421,6422,6423,4159, # 4848
6424,6425,4160,6426,3740,6427,6428,6429,6430,3987,6431,4727,6432,2238,6433,6434, # 4864
4386,3988,6435,6436,3632,6437,6438,2843,6439,6440,6441,6442,3633,6443,2958,6444, # 4880
6445,3466,6446,2364,4387,3850,6447,4388,2959,3340,6448,3851,6449,4728,6450,6451, # 4896
3264,4729,6452,3193,6453,4389,4390,2706,3341,4730,6454,3139,6455,3194,6456,3051, # 4912
2124,3852,1602,4391,4161,3853,1158,3854,4162,3989,4392,3990,4731,4732,4393,2040, # 4928
4163,4394,3265,6457,2807,3467,3855,6458,6459,6460,3991,3468,4733,4734,6461,3140, # 4944
2960,6462,4735,6463,6464,6465,6466,4736,4737,4738,4739,6467,6468,4164,2403,3856, # 4960
6469,6470,2770,2844,6471,4740,6472,6473,6474,6475,6476,6477,6478,3195,6479,4741, # 4976
4395,6480,2867,6481,4742,2808,6482,2493,4165,6483,6484,6485,6486,2295,4743,6487, # 4992
6488,6489,3634,6490,6491,6492,6493,6494,6495,6496,2985,4744,6497,6498,4745,6499, # 5008
6500,2925,3141,4166,6501,6502,4746,6503,6504,4747,6505,6506,6507,2890,6508,6509, # 5024
6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,3469,4167,6520,6521,6522,4748, # 5040
4396,3741,4397,4749,4398,3342,2125,4750,6523,4751,4752,4753,3052,6524,2961,4168, # 5056
6525,4754,6526,4755,4399,2926,4169,6527,3857,6528,4400,4170,6529,4171,6530,6531, # 5072
2595,6532,6533,6534,6535,3635,6536,6537,6538,6539,6540,6541,6542,4756,6543,6544, # 5088
6545,6546,6547,6548,4401,6549,6550,6551,6552,4402,3405,4757,4403,6553,6554,6555, # 5104
4172,3742,6556,6557,6558,3992,3636,6559,6560,3053,2726,6561,3549,4173,3054,4404, # 5120
6562,6563,3993,4405,3266,3550,2809,4406,6564,6565,6566,4758,4759,6567,3743,6568, # 5136
4760,3744,4761,3470,6569,6570,6571,4407,6572,3745,4174,6573,4175,2810,4176,3196, # 5152
4762,6574,4177,6575,6576,2494,2891,3551,6577,6578,3471,6579,4408,6580,3015,3197, # 5168
6581,3343,2532,3994,3858,6582,3094,3406,4409,6583,2892,4178,4763,4410,3016,4411, # 5184
6584,3995,3142,3017,2683,6585,4179,6586,6587,4764,4412,6588,6589,4413,6590,2986, # 5200
6591,2962,3552,6592,2963,3472,6593,6594,4180,4765,6595,6596,2225,3267,4414,6597, # 5216
3407,3637,4766,6598,6599,3198,6600,4415,6601,3859,3199,6602,3473,4767,2811,4416, # 5232
1856,3268,3200,2575,3996,3997,3201,4417,6603,3095,2927,6604,3143,6605,2268,6606, # 5248
3998,3860,3096,2771,6607,6608,3638,2495,4768,6609,3861,6610,3269,2745,4769,4181, # 5264
3553,6611,2845,3270,6612,6613,6614,3862,6615,6616,4770,4771,6617,3474,3999,4418, # 5280
4419,6618,3639,3344,6619,4772,4182,6620,2126,6621,6622,6623,4420,4773,6624,3018, # 5296
6625,4774,3554,6626,4183,2025,3746,6627,4184,2707,6628,4421,4422,3097,1775,4185, # 5312
3555,6629,6630,2868,6631,6632,4423,6633,6634,4424,2414,2533,2928,6635,4186,2387, # 5328
6636,4775,6637,4187,6638,1891,4425,3202,3203,6639,6640,4776,6641,3345,6642,6643, # 5344
3640,6644,3475,3346,3641,4000,6645,3144,6646,3098,2812,4188,3642,3204,6647,3863, # 5360
3476,6648,3864,6649,4426,4001,6650,6651,6652,2576,6653,4189,4777,6654,6655,6656, # 5376
2846,6657,3477,3205,4002,6658,4003,6659,3347,2252,6660,6661,6662,4778,6663,6664, # 5392
6665,6666,6667,6668,6669,4779,4780,2048,6670,3478,3099,6671,3556,3747,4004,6672, # 5408
6673,6674,3145,4005,3748,6675,6676,6677,6678,6679,3408,6680,6681,6682,6683,3206, # 5424
3207,6684,6685,4781,4427,6686,4782,4783,4784,6687,6688,6689,4190,6690,6691,3479, # 5440
6692,2746,6693,4428,6694,6695,6696,6697,6698,6699,4785,6700,6701,3208,2727,6702, # 5456
3146,6703,6704,3409,2196,6705,4429,6706,6707,6708,2534,1996,6709,6710,6711,2747, # 5472
6712,6713,6714,4786,3643,6715,4430,4431,6716,3557,6717,4432,4433,6718,6719,6720, # 5488
6721,3749,6722,4006,4787,6723,6724,3644,4788,4434,6725,6726,4789,2772,6727,6728, # 5504
6729,6730,6731,2708,3865,2813,4435,6732,6733,4790,4791,3480,6734,6735,6736,6737, # 5520
4436,3348,6738,3410,4007,6739,6740,4008,6741,6742,4792,3411,4191,6743,6744,6745, # 5536
6746,6747,3866,6748,3750,6749,6750,6751,6752,6753,6754,6755,3867,6756,4009,6757, # 5552
4793,4794,6758,2814,2987,6759,6760,6761,4437,6762,6763,6764,6765,3645,6766,6767, # 5568
3481,4192,6768,3751,6769,6770,2174,6771,3868,3752,6772,6773,6774,4193,4795,4438, # 5584
3558,4796,4439,6775,4797,6776,6777,4798,6778,4799,3559,4800,6779,6780,6781,3482, # 5600
6782,2893,6783,6784,4194,4801,4010,6785,6786,4440,6787,4011,6788,6789,6790,6791, # 5616
6792,6793,4802,6794,6795,6796,4012,6797,6798,6799,6800,3349,4803,3483,6801,4804, # 5632
4195,6802,4013,6803,6804,4196,6805,4014,4015,6806,2847,3271,2848,6807,3484,6808, # 5648
6809,6810,4441,6811,4442,4197,4443,3272,4805,6812,3412,4016,1579,6813,6814,4017, # 5664
6815,3869,6816,2964,6817,4806,6818,6819,4018,3646,6820,6821,4807,4019,4020,6822, # 5680
6823,3560,6824,6825,4021,4444,6826,4198,6827,6828,4445,6829,6830,4199,4808,6831, # 5696
6832,6833,3870,3019,2458,6834,3753,3413,3350,6835,4809,3871,4810,3561,4446,6836, # 5712
6837,4447,4811,4812,6838,2459,4448,6839,4449,6840,6841,4022,3872,6842,4813,4814, # 5728
6843,6844,4815,4200,4201,4202,6845,4023,6846,6847,4450,3562,3873,6848,6849,4816, # 5744
4817,6850,4451,4818,2139,6851,3563,6852,6853,3351,6854,6855,3352,4024,2709,3414, # 5760
4203,4452,6856,4204,6857,6858,3874,3875,6859,6860,4819,6861,6862,6863,6864,4453, # 5776
3647,6865,6866,4820,6867,6868,6869,6870,4454,6871,2869,6872,6873,4821,6874,3754, # 5792
6875,4822,4205,6876,6877,6878,3648,4206,4455,6879,4823,6880,4824,3876,6881,3055, # 5808
4207,6882,3415,6883,6884,6885,4208,4209,6886,4210,3353,6887,3354,3564,3209,3485, # 5824
2652,6888,2728,6889,3210,3755,6890,4025,4456,6891,4825,6892,6893,6894,6895,4211, # 5840
6896,6897,6898,4826,6899,6900,4212,6901,4827,6902,2773,3565,6903,4828,6904,6905, # 5856
6906,6907,3649,3650,6908,2849,3566,6909,3567,3100,6910,6911,6912,6913,6914,6915, # 5872
4026,6916,3355,4829,3056,4457,3756,6917,3651,6918,4213,3652,2870,6919,4458,6920, # 5888
2438,6921,6922,3757,2774,4830,6923,3356,4831,4832,6924,4833,4459,3653,2507,6925, # 5904
4834,2535,6926,6927,3273,4027,3147,6928,3568,6929,6930,6931,4460,6932,3877,4461, # 5920
2729,3654,6933,6934,6935,6936,2175,4835,2630,4214,4028,4462,4836,4215,6937,3148, # 5936
4216,4463,4837,4838,4217,6938,6939,2850,4839,6940,4464,6941,6942,6943,4840,6944, # 5952
4218,3274,4465,6945,6946,2710,6947,4841,4466,6948,6949,2894,6950,6951,4842,6952, # 5968
4219,3057,2871,6953,6954,6955,6956,4467,6957,2711,6958,6959,6960,3275,3101,4843, # 5984
6961,3357,3569,6962,4844,6963,6964,4468,4845,3570,6965,3102,4846,3758,6966,4847, # 6000
3878,4848,4849,4029,6967,2929,3879,4850,4851,6968,6969,1733,6970,4220,6971,6972, # 6016
6973,6974,6975,6976,4852,6977,6978,6979,6980,6981,6982,3759,6983,6984,6985,3486, # 6032
3487,6986,3488,3416,6987,6988,6989,6990,6991,6992,6993,6994,6995,6996,6997,4853, # 6048
6998,6999,4030,7000,7001,3211,7002,7003,4221,7004,7005,3571,4031,7006,3572,7007, # 6064
2614,4854,2577,7008,7009,2965,3655,3656,4855,2775,3489,3880,4222,4856,3881,4032, # 6080
3882,3657,2730,3490,4857,7010,3149,7011,4469,4858,2496,3491,4859,2283,7012,7013, # 6096
7014,2365,4860,4470,7015,7016,3760,7017,7018,4223,1917,7019,7020,7021,4471,7022, # 6112
2776,4472,7023,7024,7025,7026,4033,7027,3573,4224,4861,4034,4862,7028,7029,1929, # 6128
3883,4035,7030,4473,3058,7031,2536,3761,3884,7032,4036,7033,2966,2895,1968,4474, # 6144
3276,4225,3417,3492,4226,2105,7034,7035,1754,2596,3762,4227,4863,4475,3763,4864, # 6160
3764,2615,2777,3103,3765,3658,3418,4865,2296,3766,2815,7036,7037,7038,3574,2872, # 6176
3277,4476,7039,4037,4477,7040,7041,4038,7042,7043,7044,7045,7046,7047,2537,7048, # 6192
7049,7050,7051,7052,7053,7054,4478,7055,7056,3767,3659,4228,3575,7057,7058,4229, # 6208
7059,7060,7061,3660,7062,3212,7063,3885,4039,2460,7064,7065,7066,7067,7068,7069, # 6224
7070,7071,7072,7073,7074,4866,3768,4867,7075,7076,7077,7078,4868,3358,3278,2653, # 6240
7079,7080,4479,3886,7081,7082,4869,7083,7084,7085,7086,7087,7088,2538,7089,7090, # 6256
7091,4040,3150,3769,4870,4041,2896,3359,4230,2930,7092,3279,7093,2967,4480,3213, # 6272
4481,3661,7094,7095,7096,7097,7098,7099,7100,7101,7102,2461,3770,7103,7104,4231, # 6288
3151,7105,7106,7107,4042,3662,7108,7109,4871,3663,4872,4043,3059,7110,7111,7112, # 6304
3493,2988,7113,4873,7114,7115,7116,3771,4874,7117,7118,4232,4875,7119,3576,2336, # 6320
4876,7120,4233,3419,4044,4877,4878,4482,4483,4879,4484,4234,7121,3772,4880,1045, # 6336
3280,3664,4881,4882,7122,7123,7124,7125,4883,7126,2778,7127,4485,4486,7128,4884, # 6352
3214,3887,7129,7130,3215,7131,4885,4045,7132,7133,4046,7134,7135,7136,7137,7138, # 6368
7139,7140,7141,7142,7143,4235,7144,4886,7145,7146,7147,4887,7148,7149,7150,4487, # 6384
4047,4488,7151,7152,4888,4048,2989,3888,7153,3665,7154,4049,7155,7156,7157,7158, # 6400
7159,7160,2931,4889,4890,4489,7161,2631,3889,4236,2779,7162,7163,4891,7164,3060, # 6416
7165,1672,4892,7166,4893,4237,3281,4894,7167,7168,3666,7169,3494,7170,7171,4050, # 6432
7172,7173,3104,3360,3420,4490,4051,2684,4052,7174,4053,7175,7176,7177,2253,4054, # 6448
7178,7179,4895,7180,3152,3890,3153,4491,3216,7181,7182,7183,2968,4238,4492,4055, # 6464
7184,2990,7185,2479,7186,7187,4493,7188,7189,7190,7191,7192,4896,7193,4897,2969, # 6480
4494,4898,7194,3495,7195,7196,4899,4495,7197,3105,2731,7198,4900,7199,7200,7201, # 6496
4056,7202,3361,7203,7204,4496,4901,4902,7205,4497,7206,7207,2315,4903,7208,4904, # 6512
7209,4905,2851,7210,7211,3577,7212,3578,4906,7213,4057,3667,4907,7214,4058,2354, # 6528
3891,2376,3217,3773,7215,7216,7217,7218,7219,4498,7220,4908,3282,2685,7221,3496, # 6544
4909,2632,3154,4910,7222,2337,7223,4911,7224,7225,7226,4912,4913,3283,4239,4499, # 6560
7227,2816,7228,7229,7230,7231,7232,7233,7234,4914,4500,4501,7235,7236,7237,2686, # 6576
7238,4915,7239,2897,4502,7240,4503,7241,2516,7242,4504,3362,3218,7243,7244,7245, # 6592
4916,7246,7247,4505,3363,7248,7249,7250,7251,3774,4506,7252,7253,4917,7254,7255, # 6608
3284,2991,4918,4919,3219,3892,4920,3106,3497,4921,7256,7257,7258,4922,7259,4923, # 6624
3364,4507,4508,4059,7260,4240,3498,7261,7262,4924,7263,2992,3893,4060,3220,7264, # 6640
7265,7266,7267,7268,7269,4509,3775,7270,2817,7271,4061,4925,4510,3776,7272,4241, # 6656
4511,3285,7273,7274,3499,7275,7276,7277,4062,4512,4926,7278,3107,3894,7279,7280, # 6672
4927,7281,4513,7282,7283,3668,7284,7285,4242,4514,4243,7286,2058,4515,4928,4929, # 6688
4516,7287,3286,4244,7288,4517,7289,7290,7291,3669,7292,7293,4930,4931,4932,2355, # 6704
4933,7294,2633,4518,7295,4245,7296,7297,4519,7298,7299,4520,4521,4934,7300,4246, # 6720
4522,7301,7302,7303,3579,7304,4247,4935,7305,4936,7306,7307,7308,7309,3777,7310, # 6736
4523,7311,7312,7313,4248,3580,7314,4524,3778,4249,7315,3581,7316,3287,7317,3221, # 6752
7318,4937,7319,7320,7321,7322,7323,7324,4938,4939,7325,4525,7326,7327,7328,4063, # 6768
7329,7330,4940,7331,7332,4941,7333,4526,7334,3500,2780,1741,4942,2026,1742,7335, # 6784
7336,3582,4527,2388,7337,7338,7339,4528,7340,4250,4943,7341,7342,7343,4944,7344, # 6800
7345,7346,3020,7347,4945,7348,7349,7350,7351,3895,7352,3896,4064,3897,7353,7354, # 6816
7355,4251,7356,7357,3898,7358,3779,7359,3780,3288,7360,7361,4529,7362,4946,4530, # 6832
2027,7363,3899,4531,4947,3222,3583,7364,4948,7365,7366,7367,7368,4949,3501,4950, # 6848
3781,4951,4532,7369,2517,4952,4252,4953,3155,7370,4954,4955,4253,2518,4533,7371, # 6864
7372,2712,4254,7373,7374,7375,3670,4956,3671,7376,2389,3502,4065,7377,2338,7378, # 6880
7379,7380,7381,3061,7382,4957,7383,7384,7385,7386,4958,4534,7387,7388,2993,7389, # 6896
3062,7390,4959,7391,7392,7393,4960,3108,4961,7394,4535,7395,4962,3421,4536,7396, # 6912
4963,7397,4964,1857,7398,4965,7399,7400,2176,3584,4966,7401,7402,3422,4537,3900, # 6928
3585,7403,3782,7404,2852,7405,7406,7407,4538,3783,2654,3423,4967,4539,7408,3784, # 6944
3586,2853,4540,4541,7409,3901,7410,3902,7411,7412,3785,3109,2327,3903,7413,7414, # 6960
2970,4066,2932,7415,7416,7417,3904,3672,3424,7418,4542,4543,4544,7419,4968,7420, # 6976
7421,4255,7422,7423,7424,7425,7426,4067,7427,3673,3365,4545,7428,3110,2559,3674, # 6992
7429,7430,3156,7431,7432,3503,7433,3425,4546,7434,3063,2873,7435,3223,4969,4547, # 7008
4548,2898,4256,4068,7436,4069,3587,3786,2933,3787,4257,4970,4971,3788,7437,4972, # 7024
3064,7438,4549,7439,7440,7441,7442,7443,4973,3905,7444,2874,7445,7446,7447,7448, # 7040
3021,7449,4550,3906,3588,4974,7450,7451,3789,3675,7452,2578,7453,4070,7454,7455, # 7056
7456,4258,3676,7457,4975,7458,4976,4259,3790,3504,2634,4977,3677,4551,4260,7459, # 7072
7460,7461,7462,3907,4261,4978,7463,7464,7465,7466,4979,4980,7467,7468,2213,4262, # 7088
7469,7470,7471,3678,4981,7472,2439,7473,4263,3224,3289,7474,3908,2415,4982,7475, # 7104
4264,7476,4983,2655,7477,7478,2732,4552,2854,2875,7479,7480,4265,7481,4553,4984, # 7120
7482,7483,4266,7484,3679,3366,3680,2818,2781,2782,3367,3589,4554,3065,7485,4071, # 7136
2899,7486,7487,3157,2462,4072,4555,4073,4985,4986,3111,4267,2687,3368,4556,4074, # 7152
3791,4268,7488,3909,2783,7489,2656,1962,3158,4557,4987,1963,3159,3160,7490,3112, # 7168
4988,4989,3022,4990,4991,3792,2855,7491,7492,2971,4558,7493,7494,4992,7495,7496, # 7184
7497,7498,4993,7499,3426,4559,4994,7500,3681,4560,4269,4270,3910,7501,4075,4995, # 7200
4271,7502,7503,4076,7504,4996,7505,3225,4997,4272,4077,2819,3023,7506,7507,2733, # 7216
4561,7508,4562,7509,3369,3793,7510,3590,2508,7511,7512,4273,3113,2994,2616,7513, # 7232
7514,7515,7516,7517,7518,2820,3911,4078,2748,7519,7520,4563,4998,7521,7522,7523, # 7248
7524,4999,4274,7525,4564,3682,2239,4079,4565,7526,7527,7528,7529,5000,7530,7531, # 7264
5001,4275,3794,7532,7533,7534,3066,5002,4566,3161,7535,7536,4080,7537,3162,7538, # 7280
7539,4567,7540,7541,7542,7543,7544,7545,5003,7546,4568,7547,7548,7549,7550,7551, # 7296
7552,7553,7554,7555,7556,5004,7557,7558,7559,5005,7560,3795,7561,4569,7562,7563, # 7312
7564,2821,3796,4276,4277,4081,7565,2876,7566,5006,7567,7568,2900,7569,3797,3912, # 7328
7570,7571,7572,4278,7573,7574,7575,5007,7576,7577,5008,7578,7579,4279,2934,7580, # 7344
7581,5009,7582,4570,7583,4280,7584,7585,7586,4571,4572,3913,7587,4573,3505,7588, # 7360
5010,7589,7590,7591,7592,3798,4574,7593,7594,5011,7595,4281,7596,7597,7598,4282, # 7376
5012,7599,7600,5013,3163,7601,5014,7602,3914,7603,7604,2734,4575,4576,4577,7605, # 7392
7606,7607,7608,7609,3506,5015,4578,7610,4082,7611,2822,2901,2579,3683,3024,4579, # 7408
3507,7612,4580,7613,3226,3799,5016,7614,7615,7616,7617,7618,7619,7620,2995,3290, # 7424
7621,4083,7622,5017,7623,7624,7625,7626,7627,4581,3915,7628,3291,7629,5018,7630, # 7440
7631,7632,7633,4084,7634,7635,3427,3800,7636,7637,4582,7638,5019,4583,5020,7639, # 7456
3916,7640,3801,5021,4584,4283,7641,7642,3428,3591,2269,7643,2617,7644,4585,3592, # 7472
7645,4586,2902,7646,7647,3227,5022,7648,4587,7649,4284,7650,7651,7652,4588,2284, # 7488
7653,5023,7654,7655,7656,4589,5024,3802,7657,7658,5025,3508,4590,7659,7660,7661, # 7504
1969,5026,7662,7663,3684,1821,2688,7664,2028,2509,4285,7665,2823,1841,7666,2689, # 7520
3114,7667,3917,4085,2160,5027,5028,2972,7668,5029,7669,7670,7671,3593,4086,7672, # 7536
4591,4087,5030,3803,7673,7674,7675,7676,7677,7678,7679,4286,2366,4592,4593,3067, # 7552
2328,7680,7681,4594,3594,3918,2029,4287,7682,5031,3919,3370,4288,4595,2856,7683, # 7568
3509,7684,7685,5032,5033,7686,7687,3804,2784,7688,7689,7690,7691,3371,7692,7693, # 7584
2877,5034,7694,7695,3920,4289,4088,7696,7697,7698,5035,7699,5036,4290,5037,5038, # 7600
5039,7700,7701,7702,5040,5041,3228,7703,1760,7704,5042,3229,4596,2106,4089,7705, # 7616
4597,2824,5043,2107,3372,7706,4291,4090,5044,7707,4091,7708,5045,3025,3805,4598, # 7632
4292,4293,4294,3373,7709,4599,7710,5046,7711,7712,5047,5048,3806,7713,7714,7715, # 7648
5049,7716,7717,7718,7719,4600,5050,7720,7721,7722,5051,7723,4295,3429,7724,7725, # 7664
7726,7727,3921,7728,3292,5052,4092,7729,7730,7731,7732,7733,7734,7735,5053,5054, # 7680
7736,7737,7738,7739,3922,3685,7740,7741,7742,7743,2635,5055,7744,5056,4601,7745, # 7696
7746,2560,7747,7748,7749,7750,3923,7751,7752,7753,7754,7755,4296,2903,7756,7757, # 7712
7758,7759,7760,3924,7761,5057,4297,7762,7763,5058,4298,7764,4093,7765,7766,5059, # 7728
3925,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,3595,7777,4299,5060,4094, # 7744
7778,3293,5061,7779,7780,4300,7781,7782,4602,7783,3596,7784,7785,3430,2367,7786, # 7760
3164,5062,5063,4301,7787,7788,4095,5064,5065,7789,3374,3115,7790,7791,7792,7793, # 7776
7794,7795,7796,3597,4603,7797,7798,3686,3116,3807,5066,7799,7800,5067,7801,7802, # 7792
4604,4302,5068,4303,4096,7803,7804,3294,7805,7806,5069,4605,2690,7807,3026,7808, # 7808
7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824, # 7824
7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7840
7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,7856, # 7856
7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,7872, # 7872
7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,7888, # 7888
7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,7904, # 7904
7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,7920, # 7920
7921,7922,7923,7924,3926,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935, # 7936
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951, # 7952
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967, # 7968
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983, # 7984
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999, # 8000
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015, # 8016
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031, # 8032
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047, # 8048
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063, # 8064
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079, # 8080
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095, # 8096
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111, # 8112
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127, # 8128
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143, # 8144
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159, # 8160
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175, # 8176
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191, # 8192
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207, # 8208
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223, # 8224
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239, # 8240
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255, # 8256
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271) # 8272
# flake8: noqa
| apache-2.0 |
cucumber/gherkin-python | gherkin/__main__.py | 2 | 1125 | import os
from optparse import OptionParser
import sys
if sys.version_info < (3, 0):
string_type = basestring
if os.name != 'nt':
import codecs
UTF8Writer = codecs.getwriter('utf8')
sys.stdout = UTF8Writer(sys.stdout)
else:
string_type = str
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import json
from gherkin.stream.gherkin_events import GherkinEvents
from gherkin.stream.source_events import SourceEvents
parser = OptionParser()
parser.add_option("--no-source", action="store_false", dest="print_source", default=True, help="don't print source events")
parser.add_option("--no-ast", action="store_false", dest="print_ast", default=True, help="don't print ast events")
parser.add_option("--no-pickles", action="store_false", dest="print_pickles", default=True, help="don't print pickle events")
(options, args) = parser.parse_args()
source_events = SourceEvents(args)
gherkin_events = GherkinEvents(options)
for source_event in source_events.enum():
for event in gherkin_events.enum(source_event):
print(json.dumps(event))
| mit |
puttarajubr/commcare-hq | custom/ilsgateway/management/commands/test_report_data_generation.py | 1 | 1783 | from datetime import datetime
from django.core.management import BaseCommand
from corehq.apps.locations.models import SQLLocation
from custom.ilsgateway.models import ILSGatewayConfig, ReportRun, SupplyPointStatus, DeliveryGroupReport, \
SupplyPointWarehouseRecord, OrganizationSummary, ProductAvailabilityData, Alert
from custom.ilsgateway.tasks import report_run
from custom.ilsgateway.tanzania.warehouse import updater
class Command(BaseCommand):
"""
Manually test the stock data migration.
"""
def handle(self, domain, *args, **options):
if len(args) == 1:
ilsgateway_id = args[0]
else:
ilsgateway_id = 1166 # defaults to bondenzi: http://ilsgateway.com/tz/facility/1166/
# monkey patch the default start date to cover less data
updater.default_start_date = lambda: datetime(2015, 1, 1)
config = ILSGatewayConfig.for_domain(domain)
assert config.enabled, 'ilsgateway sync must be configured for this domain'
locations = _get_locations_from_ilsgateway_id(domain, ilsgateway_id)
_clear_data(domain)
report_run(domain, locations, strict=False)
def _clear_data(domain):
ReportRun.objects.filter(domain=domain).delete()
SupplyPointStatus.objects.all().delete()
DeliveryGroupReport.objects.all().delete()
SupplyPointWarehouseRecord.objects.all().delete()
OrganizationSummary.objects.all().delete()
ProductAvailabilityData.objects.all().delete()
Alert.objects.all().delete()
def _get_locations_from_ilsgateway_id(domain, ilsgateway_id):
facility = SQLLocation.objects.get(domain=domain, external_id=ilsgateway_id)
return [facility.couch_location] + [facility.parent.couch_location] + [facility.parent.parent.couch_location]
| bsd-3-clause |
rossburton/yocto-autobuilder | lib/python2.7/site-packages/SQLAlchemy-0.7.0-py2.7-linux-x86_64.egg/sqlalchemy/types.py | 8 | 75051 | # sqlalchemy/types.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""defines genericized SQL types, each represented by a subclass of
:class:`~sqlalchemy.types.AbstractType`. Dialects define further subclasses of these
types.
For more information see the SQLAlchemy documentation on types.
"""
__all__ = [ 'TypeEngine', 'TypeDecorator', 'AbstractType', 'UserDefinedType',
'INT', 'CHAR', 'VARCHAR', 'NCHAR', 'NVARCHAR','TEXT', 'Text',
'FLOAT', 'NUMERIC', 'REAL', 'DECIMAL', 'TIMESTAMP', 'DATETIME',
'CLOB', 'BLOB', 'BOOLEAN', 'SMALLINT', 'INTEGER', 'DATE', 'TIME',
'String', 'Integer', 'SmallInteger', 'BigInteger', 'Numeric',
'Float', 'DateTime', 'Date', 'Time', 'LargeBinary', 'Binary',
'Boolean', 'Unicode', 'MutableType', 'Concatenable',
'UnicodeText','PickleType', 'Interval', 'Enum' ]
import inspect
import datetime as dt
import codecs
from sqlalchemy import exc, schema
from sqlalchemy.sql import expression, operators
from sqlalchemy.util import pickle
from sqlalchemy.util.compat import decimal
from sqlalchemy.sql.visitors import Visitable
from sqlalchemy import util
from sqlalchemy import processors, events
import collections
default = util.importlater("sqlalchemy.engine", "default")
NoneType = type(None)
if util.jython:
import array
class AbstractType(Visitable):
"""Base for all types - not needed except for backwards
compatibility."""
class TypeEngine(AbstractType):
"""Base for built-in types."""
def copy_value(self, value):
return value
def bind_processor(self, dialect):
"""Return a conversion function for processing bind values.
Returns a callable which will receive a bind parameter value
as the sole positional argument and will return a value to
send to the DB-API.
If processing is not necessary, the method should return ``None``.
:param dialect: Dialect instance in use.
"""
return None
def result_processor(self, dialect, coltype):
"""Return a conversion function for processing result row values.
Returns a callable which will receive a result row column
value as the sole positional argument and will return a value
to return to the user.
If processing is not necessary, the method should return ``None``.
:param dialect: Dialect instance in use.
:param coltype: DBAPI coltype argument received in cursor.description.
"""
return None
def compare_values(self, x, y):
"""Compare two values for equality."""
return x == y
def is_mutable(self):
"""Return True if the target Python type is 'mutable'.
This allows systems like the ORM to know if a column value can
be considered 'not changed' by comparing the identity of
objects alone. Values such as dicts, lists which
are serialized into strings are examples of "mutable"
column structures.
.. note:: This functionality is now superseded by the
``sqlalchemy.ext.mutable`` extension described in
:ref:`mutable_toplevel`.
When this method is overridden, :meth:`copy_value` should
also be supplied. The :class:`.MutableType` mixin
is recommended as a helper.
"""
return False
def get_dbapi_type(self, dbapi):
"""Return the corresponding type object from the underlying DB-API, if
any.
This can be useful for calling ``setinputsizes()``, for example.
"""
return None
def _adapt_expression(self, op, othertype):
"""evaluate the return type of <self> <op> <othertype>,
and apply any adaptations to the given operator.
"""
return op, self
@util.memoized_property
def _type_affinity(self):
"""Return a rudimental 'affinity' value expressing the general class
of type."""
typ = None
for t in self.__class__.__mro__:
if t is TypeEngine or t is UserDefinedType:
return typ
elif issubclass(t, TypeEngine):
typ = t
else:
return self.__class__
def dialect_impl(self, dialect):
"""Return a dialect-specific implementation for this :class:`.TypeEngine`."""
try:
return dialect._type_memos[self]['impl']
except KeyError:
return self._dialect_info(dialect)['impl']
def _cached_bind_processor(self, dialect):
"""Return a dialect-specific bind processor for this type."""
try:
return dialect._type_memos[self]['bind']
except KeyError:
d = self._dialect_info(dialect)
d['bind'] = bp = d['impl'].bind_processor(dialect)
return bp
def _cached_result_processor(self, dialect, coltype):
"""Return a dialect-specific result processor for this type."""
try:
return dialect._type_memos[self][coltype]
except KeyError:
d = self._dialect_info(dialect)
# key assumption: DBAPI type codes are
# constants. Else this dictionary would
# grow unbounded.
d[coltype] = rp = d['impl'].result_processor(dialect, coltype)
return rp
def _dialect_info(self, dialect):
"""Return a dialect-specific registry which
caches a dialect-specific implementation, bind processing
function, and one or more result processing functions."""
if self in dialect._type_memos:
return dialect._type_memos[self]
else:
impl = self._gen_dialect_impl(dialect)
if impl is self:
impl = self.adapt(type(self))
# this can't be self, else we create a cycle
assert impl is not self
dialect._type_memos[self] = d = {'impl':impl}
return d
def _gen_dialect_impl(self, dialect):
return dialect.type_descriptor(self)
def adapt(self, cls, **kw):
"""Produce an "adapted" form of this type, given an "impl" class
to work with.
This method is used internally to associate generic
types with "implementation" types that are specific to a particular
dialect.
"""
return util.constructor_copy(self, cls, **kw)
def _coerce_compared_value(self, op, value):
"""Suggest a type for a 'coerced' Python value in an expression.
Given an operator and value, gives the type a chance
to return a type which the value should be coerced into.
The default behavior here is conservative; if the right-hand
side is already coerced into a SQL type based on its
Python type, it is usually left alone.
End-user functionality extension here should generally be via
:class:`.TypeDecorator`, which provides more liberal behavior in that
it defaults to coercing the other side of the expression into this
type, thus applying special Python conversions above and beyond those
needed by the DBAPI to both ides. It also provides the public method
:meth:`.TypeDecorator.coerce_compared_value` which is intended for
end-user customization of this behavior.
"""
_coerced_type = _type_map.get(type(value), NULLTYPE)
if _coerced_type is NULLTYPE or _coerced_type._type_affinity \
is self._type_affinity:
return self
else:
return _coerced_type
def _compare_type_affinity(self, other):
return self._type_affinity is other._type_affinity
def compile(self, dialect=None):
"""Produce a string-compiled form of this :class:`.TypeEngine`.
When called with no arguments, uses a "default" dialect
to produce a string result.
:param dialect: a :class:`.Dialect` instance.
"""
# arg, return value is inconsistent with
# ClauseElement.compile()....this is a mistake.
if not dialect:
dialect = self._default_dialect
return dialect.type_compiler.process(self)
@property
def _default_dialect(self):
if self.__class__.__module__.startswith("sqlalchemy.dialects"):
tokens = self.__class__.__module__.split(".")[0:3]
mod = ".".join(tokens)
return getattr(__import__(mod).dialects, tokens[-1]).dialect()
else:
return default.DefaultDialect()
def __str__(self):
# Py3K
#return unicode(self.compile())
# Py2K
return unicode(self.compile()).\
encode('ascii', 'backslashreplace')
# end Py2K
def __init__(self, *args, **kwargs):
"""Support implementations that were passing arguments"""
if args or kwargs:
util.warn_deprecated("Passing arguments to type object "
"constructor %s is deprecated" % self.__class__)
def __repr__(self):
return "%s(%s)" % (
self.__class__.__name__,
", ".join("%s=%r" % (k, getattr(self, k, None))
for k in inspect.getargspec(self.__init__)[0][1:]))
class UserDefinedType(TypeEngine):
"""Base for user defined types.
This should be the base of new types. Note that
for most cases, :class:`.TypeDecorator` is probably
more appropriate::
import sqlalchemy.types as types
class MyType(types.UserDefinedType):
def __init__(self, precision = 8):
self.precision = precision
def get_col_spec(self):
return "MYTYPE(%s)" % self.precision
def bind_processor(self, dialect):
def process(value):
return value
return process
def result_processor(self, dialect, coltype):
def process(value):
return value
return process
Once the type is made, it's immediately usable::
table = Table('foo', meta,
Column('id', Integer, primary_key=True),
Column('data', MyType(16))
)
"""
__visit_name__ = "user_defined"
def _adapt_expression(self, op, othertype):
"""evaluate the return type of <self> <op> <othertype>,
and apply any adaptations to the given operator.
"""
return self.adapt_operator(op), self
def adapt_operator(self, op):
"""A hook which allows the given operator to be adapted
to something new.
See also UserDefinedType._adapt_expression(), an as-yet-
semi-public method with greater capability in this regard.
"""
return op
class TypeDecorator(TypeEngine):
"""Allows the creation of types which add additional functionality
to an existing type.
This method is preferred to direct subclassing of SQLAlchemy's
built-in types as it ensures that all required functionality of
the underlying type is kept in place.
Typical usage::
import sqlalchemy.types as types
class MyType(types.TypeDecorator):
'''Prefixes Unicode values with "PREFIX:" on the way in and
strips it off on the way out.
'''
impl = types.Unicode
def process_bind_param(self, value, dialect):
return "PREFIX:" + value
def process_result_value(self, value, dialect):
return value[7:]
def copy(self):
return MyType(self.impl.length)
The class-level "impl" variable is required, and can reference any
TypeEngine class. Alternatively, the load_dialect_impl() method
can be used to provide different type classes based on the dialect
given; in this case, the "impl" variable can reference
``TypeEngine`` as a placeholder.
Types that receive a Python type that isn't similar to the ultimate type
used may want to define the :meth:`TypeDecorator.coerce_compared_value`
method. This is used to give the expression system a hint when coercing
Python objects into bind parameters within expressions. Consider this
expression::
mytable.c.somecol + datetime.date(2009, 5, 15)
Above, if "somecol" is an ``Integer`` variant, it makes sense that
we're doing date arithmetic, where above is usually interpreted
by databases as adding a number of days to the given date.
The expression system does the right thing by not attempting to
coerce the "date()" value into an integer-oriented bind parameter.
However, in the case of ``TypeDecorator``, we are usually changing an
incoming Python type to something new - ``TypeDecorator`` by default will
"coerce" the non-typed side to be the same type as itself. Such as below,
we define an "epoch" type that stores a date value as an integer::
class MyEpochType(types.TypeDecorator):
impl = types.Integer
epoch = datetime.date(1970, 1, 1)
def process_bind_param(self, value, dialect):
return (value - self.epoch).days
def process_result_value(self, value, dialect):
return self.epoch + timedelta(days=value)
Our expression of ``somecol + date`` with the above type will coerce the
"date" on the right side to also be treated as ``MyEpochType``.
This behavior can be overridden via the
:meth:`~TypeDecorator.coerce_compared_value` method, which returns a type
that should be used for the value of the expression. Below we set it such
that an integer value will be treated as an ``Integer``, and any other
value is assumed to be a date and will be treated as a ``MyEpochType``::
def coerce_compared_value(self, op, value):
if isinstance(value, int):
return Integer()
else:
return self
"""
__visit_name__ = "type_decorator"
def __init__(self, *args, **kwargs):
"""Construct a :class:`.TypeDecorator`.
Arguments sent here are passed to the constructor
of the class assigned to the ``impl`` class level attribute,
where the ``self.impl`` attribute is assigned an instance
of the implementation type. If ``impl`` at the class level
is already an instance, then it's assigned to ``self.impl``
as is.
Subclasses can override this to customize the generation
of ``self.impl``.
"""
if not hasattr(self.__class__, 'impl'):
raise AssertionError("TypeDecorator implementations "
"require a class-level variable "
"'impl' which refers to the class of "
"type being decorated")
self.impl = to_instance(self.__class__.impl, *args, **kwargs)
def _gen_dialect_impl(self, dialect):
adapted = dialect.type_descriptor(self)
if adapted is not self:
return adapted
# otherwise adapt the impl type, link
# to a copy of this TypeDecorator and return
# that.
typedesc = self.load_dialect_impl(dialect).dialect_impl(dialect)
tt = self.copy()
if not isinstance(tt, self.__class__):
raise AssertionError('Type object %s does not properly '
'implement the copy() method, it must '
'return an object of type %s' % (self,
self.__class__))
tt.impl = typedesc
return tt
@util.memoized_property
def _type_affinity(self):
return self.impl._type_affinity
def type_engine(self, dialect):
"""Return a dialect-specific :class:`.TypeEngine` instance for this :class:`.TypeDecorator`.
In most cases this returns a dialect-adapted form of
the :class:`.TypeEngine` type represented by ``self.impl``.
Makes usage of :meth:`dialect_impl` but also traverses
into wrapped :class:`.TypeDecorator` instances.
Behavior can be customized here by overriding :meth:`load_dialect_impl`.
"""
adapted = dialect.type_descriptor(self)
if type(adapted) is not type(self):
return adapted
elif isinstance(self.impl, TypeDecorator):
return self.impl.type_engine(dialect)
else:
return self.load_dialect_impl(dialect)
def load_dialect_impl(self, dialect):
"""Return a :class:`.TypeEngine` object corresponding to a dialect.
This is an end-user override hook that can be used to provide
differing types depending on the given dialect. It is used
by the :class:`.TypeDecorator` implementation of :meth:`type_engine`
to help determine what type should ultimately be returned
for a given :class:`.TypeDecorator`.
By default returns ``self.impl``.
"""
return self.impl
def __getattr__(self, key):
"""Proxy all other undefined accessors to the underlying
implementation."""
return getattr(self.impl, key)
def process_bind_param(self, value, dialect):
"""Receive a bound parameter value to be converted.
Subclasses override this method to return the
value that should be passed along to the underlying
:class:`.TypeEngine` object, and from there to the
DBAPI ``execute()`` method.
:param value: the value. Can be None.
:param dialect: the :class:`.Dialect` in use.
"""
raise NotImplementedError()
def process_result_value(self, value, dialect):
"""Receive a result-row column value to be converted.
Subclasses override this method to return the
value that should be passed back to the application,
given a value that is already processed by
the underlying :class:`.TypeEngine` object, originally
from the DBAPI cursor method ``fetchone()`` or similar.
:param value: the value. Can be None.
:param dialect: the :class:`.Dialect` in use.
"""
raise NotImplementedError()
def bind_processor(self, dialect):
"""Provide a bound value processing function for the given :class:`.Dialect`.
This is the method that fulfills the :class:`.TypeEngine`
contract for bound value conversion. :class:`.TypeDecorator`
will wrap a user-defined implementation of
:meth:`process_bind_param` here.
User-defined code can override this method directly,
though its likely best to use :meth:`process_bind_param` so that
the processing provided by ``self.impl`` is maintained.
"""
if self.__class__.process_bind_param.func_code \
is not TypeDecorator.process_bind_param.func_code:
process_param = self.process_bind_param
impl_processor = self.impl.bind_processor(dialect)
if impl_processor:
def process(value):
return impl_processor(process_param(value, dialect))
else:
def process(value):
return process_param(value, dialect)
return process
else:
return self.impl.bind_processor(dialect)
def result_processor(self, dialect, coltype):
"""Provide a result value processing function for the given :class:`.Dialect`.
This is the method that fulfills the :class:`.TypeEngine`
contract for result value conversion. :class:`.TypeDecorator`
will wrap a user-defined implementation of
:meth:`process_result_value` here.
User-defined code can override this method directly,
though its likely best to use :meth:`process_result_value` so that
the processing provided by ``self.impl`` is maintained.
"""
if self.__class__.process_result_value.func_code \
is not TypeDecorator.process_result_value.func_code:
process_value = self.process_result_value
impl_processor = self.impl.result_processor(dialect,
coltype)
if impl_processor:
def process(value):
return process_value(impl_processor(value), dialect)
else:
def process(value):
return process_value(value, dialect)
return process
else:
return self.impl.result_processor(dialect, coltype)
def coerce_compared_value(self, op, value):
"""Suggest a type for a 'coerced' Python value in an expression.
By default, returns self. This method is called by
the expression system when an object using this type is
on the left or right side of an expression against a plain Python
object which does not yet have a SQLAlchemy type assigned::
expr = table.c.somecolumn + 35
Where above, if ``somecolumn`` uses this type, this method will
be called with the value ``operator.add``
and ``35``. The return value is whatever SQLAlchemy type should
be used for ``35`` for this particular operation.
"""
return self
def _coerce_compared_value(self, op, value):
"""See :meth:`.TypeEngine._coerce_compared_value` for a description."""
return self.coerce_compared_value(op, value)
def copy(self):
"""Produce a copy of this :class:`.TypeDecorator` instance.
This is a shallow copy and is provided to fulfill part of
the :class:`.TypeEngine` contract. It usually does not
need to be overridden unless the user-defined :class:`.TypeDecorator`
has local state that should be deep-copied.
"""
instance = self.__class__.__new__(self.__class__)
instance.__dict__.update(self.__dict__)
return instance
def get_dbapi_type(self, dbapi):
"""Return the DBAPI type object represented by this :class:`.TypeDecorator`.
By default this calls upon :meth:`.TypeEngine.get_dbapi_type` of the
underlying "impl".
"""
return self.impl.get_dbapi_type(dbapi)
def copy_value(self, value):
"""Given a value, produce a copy of it.
By default this calls upon :meth:`.TypeEngine.copy_value`
of the underlying "impl".
:meth:`.copy_value` will return the object
itself, assuming "mutability" is not enabled.
Only the :class:`.MutableType` mixin provides a copy
function that actually produces a new object.
The copying function is used by the ORM when
"mutable" types are used, to memoize the original
version of an object as loaded from the database,
which is then compared to the possibly mutated
version to check for changes.
Modern implementations should use the
``sqlalchemy.ext.mutable`` extension described in
:ref:`mutable_toplevel` for intercepting in-place
changes to values.
"""
return self.impl.copy_value(value)
def compare_values(self, x, y):
"""Given two values, compare them for equality.
By default this calls upon :meth:`.TypeEngine.compare_values`
of the underlying "impl", which in turn usually
uses the Python equals operator ``==``.
This function is used by the ORM to compare
an original-loaded value with an intercepted
"changed" value, to determine if a net change
has occurred.
"""
return self.impl.compare_values(x, y)
def is_mutable(self):
"""Return True if the target Python type is 'mutable'.
This allows systems like the ORM to know if a column value can
be considered 'not changed' by comparing the identity of
objects alone. Values such as dicts, lists which
are serialized into strings are examples of "mutable"
column structures.
.. note:: This functionality is now superseded by the
``sqlalchemy.ext.mutable`` extension described in
:ref:`mutable_toplevel`.
"""
return self.impl.is_mutable()
def _adapt_expression(self, op, othertype):
op, typ =self.impl._adapt_expression(op, othertype)
if typ is self.impl:
return op, self
else:
return op, typ
class MutableType(object):
"""A mixin that marks a :class:`.TypeEngine` as representing
a mutable Python object type. This functionality is used
only by the ORM.
.. note:: :class:`.MutableType` is superseded as of SQLAlchemy 0.7
by the ``sqlalchemy.ext.mutable`` extension described in
:ref:`mutable_toplevel`. This extension provides an event
driven approach to in-place mutation detection that does not
incur the severe performance penalty of the :class:`.MutableType`
approach.
"mutable" means that changes can occur in place to a value
of this type. Examples includes Python lists, dictionaries,
and sets, as well as user-defined objects. The primary
need for identification of "mutable" types is by the ORM,
which applies special rules to such values in order to guarantee
that changes are detected. These rules may have a significant
performance impact, described below.
A :class:`.MutableType` usually allows a flag called
``mutable=False`` to enable/disable the "mutability" flag,
represented on this class by :meth:`is_mutable`. Examples
include :class:`.PickleType` and
:class:`~sqlalchemy.dialects.postgresql.base.ARRAY`. Setting
this flag to ``True`` enables mutability-specific behavior
by the ORM.
The :meth:`copy_value` and :meth:`compare_values` functions
represent a copy and compare function for values of this
type - implementing subclasses should override these
appropriately.
.. warning:: The usage of mutable types has significant performance
implications when using the ORM. In order to detect changes, the
ORM must create a copy of the value when it is first
accessed, so that changes to the current value can be compared
against the "clean" database-loaded value. Additionally, when the
ORM checks to see if any data requires flushing, it must scan
through all instances in the session which are known to have
"mutable" attributes and compare the current value of each
one to its "clean"
value. So for example, if the Session contains 6000 objects (a
fairly large amount) and autoflush is enabled, every individual
execution of :class:`.Query` will require a full scan of that subset of
the 6000 objects that have mutable attributes, possibly resulting
in tens of thousands of additional method calls for every query.
As of SQLAlchemy 0.7, the ``sqlalchemy.ext.mutable`` is provided which
allows an event driven approach to in-place mutation detection. This
approach should now be favored over the usage of :class:`.MutableType`
with ``mutable=True``. ``sqlalchemy.ext.mutable`` is described in
:ref:`mutable_toplevel`.
"""
def is_mutable(self):
"""Return True if the target Python type is 'mutable'.
For :class:`.MutableType`, this method is set to
return ``True``.
"""
return True
def copy_value(self, value):
"""Unimplemented."""
raise NotImplementedError()
def compare_values(self, x, y):
"""Compare *x* == *y*."""
return x == y
def to_instance(typeobj, *arg, **kw):
if typeobj is None:
return NULLTYPE
if util.callable(typeobj):
return typeobj(*arg, **kw)
else:
return typeobj
def adapt_type(typeobj, colspecs):
if isinstance(typeobj, type):
typeobj = typeobj()
for t in typeobj.__class__.__mro__[0:-1]:
try:
impltype = colspecs[t]
break
except KeyError:
pass
else:
# couldnt adapt - so just return the type itself
# (it may be a user-defined type)
return typeobj
# if we adapted the given generic type to a database-specific type,
# but it turns out the originally given "generic" type
# is actually a subclass of our resulting type, then we were already
# given a more specific type than that required; so use that.
if (issubclass(typeobj.__class__, impltype)):
return typeobj
return typeobj.adapt(impltype)
class NullType(TypeEngine):
"""An unknown type.
NullTypes will stand in if :class:`~sqlalchemy.Table` reflection
encounters a column data type unknown to SQLAlchemy. The
resulting columns are nearly fully usable: the DB-API adapter will
handle all translation to and from the database data type.
NullType does not have sufficient information to particpate in a
``CREATE TABLE`` statement and will raise an exception if
encountered during a :meth:`~sqlalchemy.Table.create` operation.
"""
__visit_name__ = 'null'
def _adapt_expression(self, op, othertype):
if isinstance(othertype, NullType) or not operators.is_commutative(op):
return op, self
else:
return othertype._adapt_expression(op, self)
NullTypeEngine = NullType
class Concatenable(object):
"""A mixin that marks a type as supporting 'concatenation',
typically strings."""
def _adapt_expression(self, op, othertype):
if op is operators.add and issubclass(othertype._type_affinity,
(Concatenable, NullType)):
return operators.concat_op, self
else:
return op, self
class _DateAffinity(object):
"""Mixin date/time specific expression adaptations.
Rules are implemented within Date,Time,Interval,DateTime, Numeric,
Integer. Based on http://www.postgresql.org/docs/current/static
/functions-datetime.html.
"""
@property
def _expression_adaptations(self):
raise NotImplementedError()
_blank_dict = util.immutabledict()
def _adapt_expression(self, op, othertype):
othertype = othertype._type_affinity
return op, \
self._expression_adaptations.get(op, self._blank_dict).\
get(othertype, NULLTYPE)
class String(Concatenable, TypeEngine):
"""The base for all string and character types.
In SQL, corresponds to VARCHAR. Can also take Python unicode objects
and encode to the database's encoding in bind params (and the reverse for
result sets.)
The `length` field is usually required when the `String` type is
used within a CREATE TABLE statement, as VARCHAR requires a length
on most databases.
"""
__visit_name__ = 'string'
def __init__(self, length=None, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False
):
"""
Create a string-holding type.
:param length: optional, a length for the column for use in
DDL statements. May be safely omitted if no ``CREATE
TABLE`` will be issued. Certain databases may require a
*length* for use in DDL, and will raise an exception when
the ``CREATE TABLE`` DDL is issued. Whether the value is
interpreted as bytes or characters is database specific.
:param convert_unicode: defaults to False. If True, the
type will do what is necessary in order to accept
Python Unicode objects as bind parameters, and to return
Python Unicode objects in result rows. This may
require SQLAlchemy to explicitly coerce incoming Python
unicodes into an encoding, and from an encoding
back to Unicode, or it may not require any interaction
from SQLAlchemy at all, depending on the DBAPI in use.
When SQLAlchemy performs the encoding/decoding,
the encoding used is configured via
:attr:`~sqlalchemy.engine.base.Dialect.encoding`, which
defaults to `utf-8`.
The "convert_unicode" behavior can also be turned on
for all String types by setting
:attr:`sqlalchemy.engine.base.Dialect.convert_unicode`
on create_engine().
To instruct SQLAlchemy to perform Unicode encoding/decoding
even on a platform that already handles Unicode natively,
set convert_unicode='force'. This will incur significant
performance overhead when fetching unicode result columns.
:param assert_unicode: Deprecated. A warning is raised in all cases
when a non-Unicode object is passed when SQLAlchemy would coerce
into an encoding (note: but **not** when the DBAPI handles unicode
objects natively). To suppress or raise this warning to an error,
use the Python warnings filter documented at:
http://docs.python.org/library/warnings.html
:param unicode_error: Optional, a method to use to handle Unicode
conversion errors. Behaves like the 'errors' keyword argument to
the standard library's string.decode() functions. This flag
requires that `convert_unicode` is set to `"force"` - otherwise,
SQLAlchemy is not guaranteed to handle the task of unicode
conversion. Note that this flag adds significant performance
overhead to row-fetching operations for backends that already
return unicode objects natively (which most DBAPIs do). This
flag should only be used as an absolute last resort for reading
strings from a column with varied or corrupted encodings,
which only applies to databases that accept invalid encodings
in the first place (i.e. MySQL. *not* PG, Sqlite, etc.)
"""
if unicode_error is not None and convert_unicode != 'force':
raise exc.ArgumentError("convert_unicode must be 'force' "
"when unicode_error is set.")
if assert_unicode:
util.warn_deprecated('assert_unicode is deprecated. '
'SQLAlchemy emits a warning in all '
'cases where it would otherwise like '
'to encode a Python unicode object '
'into a specific encoding but a plain '
'bytestring is received. This does '
'*not* apply to DBAPIs that coerce '
'Unicode natively.')
self.length = length
self.convert_unicode = convert_unicode
self.unicode_error = unicode_error
self._warn_on_bytestring = _warn_on_bytestring
def bind_processor(self, dialect):
if self.convert_unicode or dialect.convert_unicode:
if dialect.supports_unicode_binds and \
self.convert_unicode != 'force':
if self._warn_on_bytestring:
def process(value):
# Py3K
#if isinstance(value, bytes):
# Py2K
if isinstance(value, str):
# end Py2K
util.warn("Unicode type received non-unicode bind "
"param value.")
return value
return process
else:
return None
else:
encoder = codecs.getencoder(dialect.encoding)
warn_on_bytestring = self._warn_on_bytestring
def process(value):
if isinstance(value, unicode):
return encoder(value, self.unicode_error)[0]
elif warn_on_bytestring and value is not None:
util.warn("Unicode type received non-unicode bind "
"param value")
return value
return process
else:
return None
def result_processor(self, dialect, coltype):
wants_unicode = self.convert_unicode or dialect.convert_unicode
needs_convert = wants_unicode and \
(dialect.returns_unicode_strings is not True or
self.convert_unicode == 'force')
if needs_convert:
to_unicode = processors.to_unicode_processor_factory(
dialect.encoding, self.unicode_error)
if dialect.returns_unicode_strings:
# we wouldn't be here unless convert_unicode='force'
# was specified, or the driver has erratic unicode-returning
# habits. since we will be getting back unicode
# in most cases, we check for it (decode will fail).
def process(value):
if isinstance(value, unicode):
return value
else:
return to_unicode(value)
return process
else:
# here, we assume that the object is not unicode,
# avoiding expensive isinstance() check.
return to_unicode
else:
return None
def get_dbapi_type(self, dbapi):
return dbapi.STRING
class Text(String):
"""A variably sized string type.
In SQL, usually corresponds to CLOB or TEXT. Can also take Python
unicode objects and encode to the database's encoding in bind
params (and the reverse for result sets.)
"""
__visit_name__ = 'text'
class Unicode(String):
"""A variable length Unicode string.
The ``Unicode`` type is a :class:`.String` which converts Python
``unicode`` objects (i.e., strings that are defined as
``u'somevalue'``) into encoded bytestrings when passing the value
to the database driver, and similarly decodes values from the
database back into Python ``unicode`` objects.
It's roughly equivalent to using a ``String`` object with
``convert_unicode=True``, however
the type has other significances in that it implies the usage
of a unicode-capable type being used on the backend, such as NVARCHAR.
This may affect what type is emitted when issuing CREATE TABLE
and also may effect some DBAPI-specific details, such as type
information passed along to ``setinputsizes()``.
When using the ``Unicode`` type, it is only appropriate to pass
Python ``unicode`` objects, and not plain ``str``. If a
bytestring (``str``) is passed, a runtime warning is issued. If
you notice your application raising these warnings but you're not
sure where, the Python ``warnings`` filter can be used to turn
these warnings into exceptions which will illustrate a stack
trace::
import warnings
warnings.simplefilter('error')
Bytestrings sent to and received from the database are encoded
using the dialect's
:attr:`~sqlalchemy.engine.base.Dialect.encoding`, which defaults
to `utf-8`.
"""
__visit_name__ = 'unicode'
def __init__(self, length=None, **kwargs):
"""
Create a Unicode-converting String type.
:param length: optional, a length for the column for use in
DDL statements. May be safely omitted if no ``CREATE
TABLE`` will be issued. Certain databases may require a
*length* for use in DDL, and will raise an exception when
the ``CREATE TABLE`` DDL is issued. Whether the value is
interpreted as bytes or characters is database specific.
:param \**kwargs: passed through to the underlying ``String``
type.
"""
kwargs.setdefault('convert_unicode', True)
kwargs.setdefault('_warn_on_bytestring', True)
super(Unicode, self).__init__(length=length, **kwargs)
class UnicodeText(Text):
"""An unbounded-length Unicode string.
See :class:`.Unicode` for details on the unicode
behavior of this object.
Like ``Unicode``, usage the ``UnicodeText`` type implies a
unicode-capable type being used on the backend, such as NCLOB.
"""
__visit_name__ = 'unicode_text'
def __init__(self, length=None, **kwargs):
"""
Create a Unicode-converting Text type.
:param length: optional, a length for the column for use in
DDL statements. May be safely omitted if no ``CREATE
TABLE`` will be issued. Certain databases may require a
*length* for use in DDL, and will raise an exception when
the ``CREATE TABLE`` DDL is issued. Whether the value is
interpreted as bytes or characters is database specific.
"""
kwargs.setdefault('convert_unicode', True)
kwargs.setdefault('_warn_on_bytestring', True)
super(UnicodeText, self).__init__(length=length, **kwargs)
class Integer(_DateAffinity, TypeEngine):
"""A type for ``int`` integers."""
__visit_name__ = 'integer'
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
@util.memoized_property
def _expression_adaptations(self):
# TODO: need a dictionary object that will
# handle operators generically here, this is incomplete
return {
operators.add:{
Date:Date,
Integer:Integer,
Numeric:Numeric,
},
operators.mul:{
Interval:Interval,
Integer:Integer,
Numeric:Numeric,
},
# Py2K
operators.div:{
Integer:Integer,
Numeric:Numeric,
},
# end Py2K
operators.truediv:{
Integer:Integer,
Numeric:Numeric,
},
operators.sub:{
Integer:Integer,
Numeric:Numeric,
},
}
class SmallInteger(Integer):
"""A type for smaller ``int`` integers.
Typically generates a ``SMALLINT`` in DDL, and otherwise acts like
a normal :class:`.Integer` on the Python side.
"""
__visit_name__ = 'small_integer'
class BigInteger(Integer):
"""A type for bigger ``int`` integers.
Typically generates a ``BIGINT`` in DDL, and otherwise acts like
a normal :class:`.Integer` on the Python side.
"""
__visit_name__ = 'big_integer'
class Numeric(_DateAffinity, TypeEngine):
"""A type for fixed precision numbers.
Typically generates DECIMAL or NUMERIC. Returns
``decimal.Decimal`` objects by default, applying
conversion as needed.
.. note:: The `cdecimal <http://pypi.python.org/pypi/cdecimal/>`_ library
is a high performing alternative to Python's built-in
``decimal.Decimal`` type, which performs very poorly in high volume
situations. SQLAlchemy 0.7 is tested against ``cdecimal`` and supports
it fully. The type is not necessarily supported by DBAPI
implementations however, most of which contain an import for plain
``decimal`` in their source code, even though some such as psycopg2
provide hooks for alternate adapters. SQLAlchemy imports ``decimal``
globally as well. While the alternate ``Decimal`` class can be patched
into SQLA's ``decimal`` module, overall the most straightforward and
foolproof way to use "cdecimal" given current DBAPI and Python support
is to patch it directly into sys.modules before anything else is
imported::
import sys
import cdecimal
sys.modules["decimal"] = cdecimal
While the global patch is a little ugly, it's particularly
important to use just one decimal library at a time since
Python Decimal and cdecimal Decimal objects
are not currently compatible *with each other*::
>>> import cdecimal
>>> import decimal
>>> decimal.Decimal("10") == cdecimal.Decimal("10")
False
SQLAlchemy will provide more natural support of
cdecimal if and when it becomes a standard part of Python
installations and is supported by all DBAPIs.
"""
__visit_name__ = 'numeric'
def __init__(self, precision=None, scale=None, asdecimal=True):
"""
Construct a Numeric.
:param precision: the numeric precision for use in DDL ``CREATE
TABLE``.
:param scale: the numeric scale for use in DDL ``CREATE TABLE``.
:param asdecimal: default True. Return whether or not
values should be sent as Python Decimal objects, or
as floats. Different DBAPIs send one or the other based on
datatypes - the Numeric type will ensure that return values
are one or the other across DBAPIs consistently.
When using the ``Numeric`` type, care should be taken to ensure
that the asdecimal setting is apppropriate for the DBAPI in use -
when Numeric applies a conversion from Decimal->float or float->
Decimal, this conversion incurs an additional performance overhead
for all result columns received.
DBAPIs that return Decimal natively (e.g. psycopg2) will have
better accuracy and higher performance with a setting of ``True``,
as the native translation to Decimal reduces the amount of floating-
point issues at play, and the Numeric type itself doesn't need
to apply any further conversions. However, another DBAPI which
returns floats natively *will* incur an additional conversion
overhead, and is still subject to floating point data loss - in
which case ``asdecimal=False`` will at least remove the extra
conversion overhead.
"""
self.precision = precision
self.scale = scale
self.asdecimal = asdecimal
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
def bind_processor(self, dialect):
if dialect.supports_native_decimal:
return None
else:
return processors.to_float
def result_processor(self, dialect, coltype):
if self.asdecimal:
if dialect.supports_native_decimal:
# we're a "numeric", DBAPI will give us Decimal directly
return None
else:
util.warn('Dialect %s+%s does *not* support Decimal '
'objects natively, and SQLAlchemy must '
'convert from floating point - rounding '
'errors and other issues may occur. Please '
'consider storing Decimal numbers as strings '
'or integers on this platform for lossless '
'storage.' % (dialect.name, dialect.driver))
# we're a "numeric", DBAPI returns floats, convert.
if self.scale is not None:
return processors.to_decimal_processor_factory(
decimal.Decimal, self.scale)
else:
return processors.to_decimal_processor_factory(
decimal.Decimal)
else:
if dialect.supports_native_decimal:
return processors.to_float
else:
return None
@util.memoized_property
def _expression_adaptations(self):
return {
operators.mul:{
Interval:Interval,
Numeric:Numeric,
Integer:Numeric,
},
# Py2K
operators.div:{
Numeric:Numeric,
Integer:Numeric,
},
# end Py2K
operators.truediv:{
Numeric:Numeric,
Integer:Numeric,
},
operators.add:{
Numeric:Numeric,
Integer:Numeric,
},
operators.sub:{
Numeric:Numeric,
Integer:Numeric,
}
}
class Float(Numeric):
"""A type for ``float`` numbers.
Returns Python ``float`` objects by default, applying
conversion as needed.
"""
__visit_name__ = 'float'
scale = None
def __init__(self, precision=None, asdecimal=False, **kwargs):
"""
Construct a Float.
:param precision: the numeric precision for use in DDL ``CREATE
TABLE``.
:param asdecimal: the same flag as that of :class:`.Numeric`, but
defaults to ``False``. Note that setting this flag to ``True``
results in floating point conversion.
"""
self.precision = precision
self.asdecimal = asdecimal
def result_processor(self, dialect, coltype):
if self.asdecimal:
return processors.to_decimal_processor_factory(decimal.Decimal)
else:
return None
@util.memoized_property
def _expression_adaptations(self):
return {
operators.mul:{
Interval:Interval,
Numeric:Float,
},
# Py2K
operators.div:{
Numeric:Float,
},
# end Py2K
operators.truediv:{
Numeric:Float,
},
operators.add:{
Numeric:Float,
},
operators.sub:{
Numeric:Float,
}
}
class DateTime(_DateAffinity, TypeEngine):
"""A type for ``datetime.datetime()`` objects.
Date and time types return objects from the Python ``datetime``
module. Most DBAPIs have built in support for the datetime
module, with the noted exception of SQLite. In the case of
SQLite, date and time types are stored as strings which are then
converted back to datetime objects when rows are returned.
"""
__visit_name__ = 'datetime'
def __init__(self, timezone=False):
self.timezone = timezone
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add:{
Interval:DateTime,
},
operators.sub:{
Interval:DateTime,
DateTime:Interval,
},
}
class Date(_DateAffinity,TypeEngine):
"""A type for ``datetime.date()`` objects."""
__visit_name__ = 'date'
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add:{
Integer:Date,
Interval:DateTime,
Time:DateTime,
},
operators.sub:{
# date - integer = date
Integer:Date,
# date - date = integer.
Date:Integer,
Interval:DateTime,
# date - datetime = interval,
# this one is not in the PG docs
# but works
DateTime:Interval,
},
}
class Time(_DateAffinity,TypeEngine):
"""A type for ``datetime.time()`` objects."""
__visit_name__ = 'time'
def __init__(self, timezone=False):
self.timezone = timezone
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add:{
Date:DateTime,
Interval:Time
},
operators.sub:{
Time:Interval,
Interval:Time,
},
}
class _Binary(TypeEngine):
"""Define base behavior for binary types."""
def __init__(self, length=None):
self.length = length
# Python 3 - sqlite3 doesn't need the `Binary` conversion
# here, though pg8000 does to indicate "bytea"
def bind_processor(self, dialect):
DBAPIBinary = dialect.dbapi.Binary
def process(value):
x = self
if value is not None:
return DBAPIBinary(value)
else:
return None
return process
# Python 3 has native bytes() type
# both sqlite3 and pg8000 seem to return it
# (i.e. and not 'memoryview')
# Py2K
def result_processor(self, dialect, coltype):
if util.jython:
def process(value):
if value is not None:
if isinstance(value, array.array):
return value.tostring()
return str(value)
else:
return None
else:
process = processors.to_str
return process
# end Py2K
def _coerce_compared_value(self, op, value):
"""See :meth:`.TypeEngine._coerce_compared_value` for a description."""
if isinstance(value, basestring):
return self
else:
return super(_Binary, self)._coerce_compared_value(op, value)
def get_dbapi_type(self, dbapi):
return dbapi.BINARY
class LargeBinary(_Binary):
"""A type for large binary byte data.
The Binary type generates BLOB or BYTEA when tables are created,
and also converts incoming values using the ``Binary`` callable
provided by each DB-API.
"""
__visit_name__ = 'large_binary'
def __init__(self, length=None):
"""
Construct a LargeBinary type.
:param length: optional, a length for the column for use in
DDL statements, for those BLOB types that accept a length
(i.e. MySQL). It does *not* produce a small BINARY/VARBINARY
type - use the BINARY/VARBINARY types specifically for those.
May be safely omitted if no ``CREATE
TABLE`` will be issued. Certain databases may require a
*length* for use in DDL, and will raise an exception when
the ``CREATE TABLE`` DDL is issued.
"""
_Binary.__init__(self, length=length)
class Binary(LargeBinary):
"""Deprecated. Renamed to LargeBinary."""
def __init__(self, *arg, **kw):
util.warn_deprecated('The Binary type has been renamed to '
'LargeBinary.')
LargeBinary.__init__(self, *arg, **kw)
class SchemaType(events.SchemaEventTarget):
"""Mark a type as possibly requiring schema-level DDL for usage.
Supports types that must be explicitly created/dropped (i.e. PG ENUM type)
as well as types that are complimented by table or schema level
constraints, triggers, and other rules.
:class:`.SchemaType` classes can also be targets for the
:meth:`.DDLEvents.before_parent_attach` and :meth:`.DDLEvents.after_parent_attach`
events, where the events fire off surrounding the association of
the type object with a parent :class:`.Column`.
"""
def __init__(self, **kw):
self.name = kw.pop('name', None)
self.quote = kw.pop('quote', None)
self.schema = kw.pop('schema', None)
self.metadata = kw.pop('metadata', None)
if self.metadata:
self.metadata.append_ddl_listener('before-create',
util.portable_instancemethod(self._on_metadata_create))
self.metadata.append_ddl_listener('after-drop',
util.portable_instancemethod(self._on_metadata_drop))
def _set_parent(self, column):
column._on_table_attach(util.portable_instancemethod(self._set_table))
def _set_table(self, column, table):
table.append_ddl_listener('before-create',
util.portable_instancemethod(
self._on_table_create))
table.append_ddl_listener('after-drop',
util.portable_instancemethod(
self._on_table_drop))
if self.metadata is None:
table.metadata.append_ddl_listener('before-create',
util.portable_instancemethod(self._on_metadata_create))
table.metadata.append_ddl_listener('after-drop',
util.portable_instancemethod(self._on_metadata_drop))
@property
def bind(self):
return self.metadata and self.metadata.bind or None
def create(self, bind=None, checkfirst=False):
"""Issue CREATE ddl for this type, if applicable."""
if bind is None:
bind = schema._bind_or_error(self)
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t.create(bind=bind, checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=False):
"""Issue DROP ddl for this type, if applicable."""
if bind is None:
bind = schema._bind_or_error(self)
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t.drop(bind=bind, checkfirst=checkfirst)
def _on_table_create(self, event, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_table_create(event, target, bind, **kw)
def _on_table_drop(self, event, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_table_drop(event, target, bind, **kw)
def _on_metadata_create(self, event, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_metadata_create(event, target, bind, **kw)
def _on_metadata_drop(self, event, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_metadata_drop(event, target, bind, **kw)
class Enum(String, SchemaType):
"""Generic Enum Type.
The Enum type provides a set of possible string values which the
column is constrained towards.
By default, uses the backend's native ENUM type if available,
else uses VARCHAR + a CHECK constraint.
"""
__visit_name__ = 'enum'
def __init__(self, *enums, **kw):
"""Construct an enum.
Keyword arguments which don't apply to a specific backend are ignored
by that backend.
:param \*enums: string or unicode enumeration labels. If unicode
labels are present, the `convert_unicode` flag is auto-enabled.
:param convert_unicode: Enable unicode-aware bind parameter and
result-set processing for this Enum's data. This is set
automatically based on the presence of unicode label strings.
:param metadata: Associate this type directly with a ``MetaData``
object. For types that exist on the target database as an
independent schema construct (Postgresql), this type will be
created and dropped within ``create_all()`` and ``drop_all()``
operations. If the type is not associated with any ``MetaData``
object, it will associate itself with each ``Table`` in which it is
used, and will be created when any of those individual tables are
created, after a check is performed for it's existence. The type is
only dropped when ``drop_all()`` is called for that ``Table``
object's metadata, however.
:param name: The name of this type. This is required for Postgresql
and any future supported database which requires an explicitly
named type, or an explicitly named constraint in order to generate
the type and/or a table that uses it.
:param native_enum: Use the database's native ENUM type when
available. Defaults to True. When False, uses VARCHAR + check
constraint for all backends.
:param schema: Schemaname of this type. For types that exist on the
target database as an independent schema construct (Postgresql),
this parameter specifies the named schema in which the type is
present.
:param quote: Force quoting to be on or off on the type's name. If
left as the default of `None`, the usual schema-level "case
sensitive"/"reserved name" rules are used to determine if this
type's name should be quoted.
"""
self.enums = enums
self.native_enum = kw.pop('native_enum', True)
convert_unicode= kw.pop('convert_unicode', None)
if convert_unicode is None:
for e in enums:
if isinstance(e, unicode):
convert_unicode = True
break
else:
convert_unicode = False
if self.enums:
length =max(len(x) for x in self.enums)
else:
length = 0
String.__init__(self,
length =length,
convert_unicode=convert_unicode,
)
SchemaType.__init__(self, **kw)
def _should_create_constraint(self, compiler):
return not self.native_enum or \
not compiler.dialect.supports_native_enum
def _set_table(self, column, table):
if self.native_enum:
SchemaType._set_table(self, column, table)
e = schema.CheckConstraint(
column.in_(self.enums),
name=self.name,
_create_rule=util.portable_instancemethod(
self._should_create_constraint)
)
table.append_constraint(e)
def adapt(self, impltype, **kw):
if issubclass(impltype, Enum):
return impltype(name=self.name,
quote=self.quote,
schema=self.schema,
metadata=self.metadata,
convert_unicode=self.convert_unicode,
native_enum=self.native_enum,
*self.enums,
**kw
)
else:
return super(Enum, self).adapt(impltype, **kw)
class PickleType(MutableType, TypeDecorator):
"""Holds Python objects, which are serialized using pickle.
PickleType builds upon the Binary type to apply Python's
``pickle.dumps()`` to incoming objects, and ``pickle.loads()`` on
the way out, allowing any pickleable Python object to be stored as
a serialized binary field.
"""
impl = LargeBinary
def __init__(self, protocol=pickle.HIGHEST_PROTOCOL,
pickler=None, mutable=False, comparator=None):
"""
Construct a PickleType.
:param protocol: defaults to ``pickle.HIGHEST_PROTOCOL``.
:param pickler: defaults to cPickle.pickle or pickle.pickle if
cPickle is not available. May be any object with
pickle-compatible ``dumps` and ``loads`` methods.
:param mutable: defaults to False; implements
:meth:`AbstractType.is_mutable`. When ``True``, incoming
objects will be compared against copies of themselves
using the Python "equals" operator, unless the
``comparator`` argument is present. See
:class:`.MutableType` for details on "mutable" type
behavior. (default changed from ``True`` in
0.7.0).
.. note:: This functionality is now superseded by the
``sqlalchemy.ext.mutable`` extension described in
:ref:`mutable_toplevel`.
:param comparator: a 2-arg callable predicate used
to compare values of this type. If left as ``None``,
the Python "equals" operator is used to compare values.
"""
self.protocol = protocol
self.pickler = pickler or pickle
self.mutable = mutable
self.comparator = comparator
super(PickleType, self).__init__()
def __reduce__(self):
return PickleType, (self.protocol,
None,
self.mutable,
self.comparator)
def bind_processor(self, dialect):
impl_processor = self.impl.bind_processor(dialect)
dumps = self.pickler.dumps
protocol = self.protocol
if impl_processor:
def process(value):
if value is not None:
value = dumps(value, protocol)
return impl_processor(value)
else:
def process(value):
if value is not None:
value = dumps(value, protocol)
return value
return process
def result_processor(self, dialect, coltype):
impl_processor = self.impl.result_processor(dialect, coltype)
loads = self.pickler.loads
if impl_processor:
def process(value):
value = impl_processor(value)
if value is None:
return None
return loads(value)
else:
def process(value):
if value is None:
return None
return loads(value)
return process
def copy_value(self, value):
if self.mutable:
return self.pickler.loads(
self.pickler.dumps(value, self.protocol))
else:
return value
def compare_values(self, x, y):
if self.comparator:
return self.comparator(x, y)
else:
return x == y
def is_mutable(self):
"""Return True if the target Python type is 'mutable'.
When this method is overridden, :meth:`copy_value` should
also be supplied. The :class:`.MutableType` mixin
is recommended as a helper.
"""
return self.mutable
class Boolean(TypeEngine, SchemaType):
"""A bool datatype.
Boolean typically uses BOOLEAN or SMALLINT on the DDL side, and on
the Python side deals in ``True`` or ``False``.
"""
__visit_name__ = 'boolean'
def __init__(self, create_constraint=True, name=None):
"""Construct a Boolean.
:param create_constraint: defaults to True. If the boolean
is generated as an int/smallint, also create a CHECK constraint
on the table that ensures 1 or 0 as a value.
:param name: if a CHECK constraint is generated, specify
the name of the constraint.
"""
self.create_constraint = create_constraint
self.name = name
def _should_create_constraint(self, compiler):
return not compiler.dialect.supports_native_boolean
def _set_table(self, column, table):
if not self.create_constraint:
return
e = schema.CheckConstraint(
column.in_([0, 1]),
name=self.name,
_create_rule=util.portable_instancemethod(
self._should_create_constraint)
)
table.append_constraint(e)
def bind_processor(self, dialect):
if dialect.supports_native_boolean:
return None
else:
return processors.boolean_to_int
def result_processor(self, dialect, coltype):
if dialect.supports_native_boolean:
return None
else:
return processors.int_to_boolean
class Interval(_DateAffinity, TypeDecorator):
"""A type for ``datetime.timedelta()`` objects.
The Interval type deals with ``datetime.timedelta`` objects. In
PostgreSQL, the native ``INTERVAL`` type is used; for others, the
value is stored as a date which is relative to the "epoch"
(Jan. 1, 1970).
Note that the ``Interval`` type does not currently provide date arithmetic
operations on platforms which do not support interval types natively. Such
operations usually require transformation of both sides of the expression
(such as, conversion of both sides into integer epoch values first) which
currently is a manual procedure (such as via
:attr:`~sqlalchemy.sql.expression.func`).
"""
impl = DateTime
epoch = dt.datetime.utcfromtimestamp(0)
def __init__(self, native=True,
second_precision=None,
day_precision=None):
"""Construct an Interval object.
:param native: when True, use the actual
INTERVAL type provided by the database, if
supported (currently Postgresql, Oracle).
Otherwise, represent the interval data as
an epoch value regardless.
:param second_precision: For native interval types
which support a "fractional seconds precision" parameter,
i.e. Oracle and Postgresql
:param day_precision: for native interval types which
support a "day precision" parameter, i.e. Oracle.
"""
super(Interval, self).__init__()
self.native = native
self.second_precision = second_precision
self.day_precision = day_precision
def adapt(self, cls, **kw):
if self.native and hasattr(cls, '_adapt_from_generic_interval'):
return cls._adapt_from_generic_interval(self, **kw)
else:
return self.__class__(
native=self.native,
second_precision=self.second_precision,
day_precision=self.day_precision,
**kw)
def bind_processor(self, dialect):
impl_processor = self.impl.bind_processor(dialect)
epoch = self.epoch
if impl_processor:
def process(value):
if value is not None:
value = epoch + value
return impl_processor(value)
else:
def process(value):
if value is not None:
value = epoch + value
return value
return process
def result_processor(self, dialect, coltype):
impl_processor = self.impl.result_processor(dialect, coltype)
epoch = self.epoch
if impl_processor:
def process(value):
value = impl_processor(value)
if value is None:
return None
return value - epoch
else:
def process(value):
if value is None:
return None
return value - epoch
return process
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add:{
Date:DateTime,
Interval:Interval,
DateTime:DateTime,
Time:Time,
},
operators.sub:{
Interval:Interval
},
operators.mul:{
Numeric:Interval
},
operators.truediv: {
Numeric:Interval
},
# Py2K
operators.div: {
Numeric:Interval
}
# end Py2K
}
@property
def _type_affinity(self):
return Interval
def _coerce_compared_value(self, op, value):
"""See :meth:`.TypeEngine._coerce_compared_value` for a description."""
return self.impl._coerce_compared_value(op, value)
class REAL(Float):
"""The SQL REAL type."""
__visit_name__ = 'REAL'
class FLOAT(Float):
"""The SQL FLOAT type."""
__visit_name__ = 'FLOAT'
class NUMERIC(Numeric):
"""The SQL NUMERIC type."""
__visit_name__ = 'NUMERIC'
class DECIMAL(Numeric):
"""The SQL DECIMAL type."""
__visit_name__ = 'DECIMAL'
class INTEGER(Integer):
"""The SQL INT or INTEGER type."""
__visit_name__ = 'INTEGER'
INT = INTEGER
class SMALLINT(SmallInteger):
"""The SQL SMALLINT type."""
__visit_name__ = 'SMALLINT'
class BIGINT(BigInteger):
"""The SQL BIGINT type."""
__visit_name__ = 'BIGINT'
class TIMESTAMP(DateTime):
"""The SQL TIMESTAMP type."""
__visit_name__ = 'TIMESTAMP'
def get_dbapi_type(self, dbapi):
return dbapi.TIMESTAMP
class DATETIME(DateTime):
"""The SQL DATETIME type."""
__visit_name__ = 'DATETIME'
class DATE(Date):
"""The SQL DATE type."""
__visit_name__ = 'DATE'
class TIME(Time):
"""The SQL TIME type."""
__visit_name__ = 'TIME'
class TEXT(Text):
"""The SQL TEXT type."""
__visit_name__ = 'TEXT'
class CLOB(Text):
"""The CLOB type.
This type is found in Oracle and Informix.
"""
__visit_name__ = 'CLOB'
class VARCHAR(String):
"""The SQL VARCHAR type."""
__visit_name__ = 'VARCHAR'
class NVARCHAR(Unicode):
"""The SQL NVARCHAR type."""
__visit_name__ = 'NVARCHAR'
class CHAR(String):
"""The SQL CHAR type."""
__visit_name__ = 'CHAR'
class NCHAR(Unicode):
"""The SQL NCHAR type."""
__visit_name__ = 'NCHAR'
class BLOB(LargeBinary):
"""The SQL BLOB type."""
__visit_name__ = 'BLOB'
class BINARY(_Binary):
"""The SQL BINARY type."""
__visit_name__ = 'BINARY'
class VARBINARY(_Binary):
"""The SQL VARBINARY type."""
__visit_name__ = 'VARBINARY'
class BOOLEAN(Boolean):
"""The SQL BOOLEAN type."""
__visit_name__ = 'BOOLEAN'
NULLTYPE = NullType()
BOOLEANTYPE = Boolean()
STRINGTYPE = String()
_type_map = {
str: String(),
# Py3K
#bytes : LargeBinary(),
# Py2K
unicode : Unicode(),
# end Py2K
int : Integer(),
float : Numeric(),
bool: BOOLEANTYPE,
decimal.Decimal : Numeric(),
dt.date : Date(),
dt.datetime : DateTime(),
dt.time : Time(),
dt.timedelta : Interval(),
NoneType: NULLTYPE
}
| gpl-2.0 |
nachocano/incubator-reef | dev/change_version.py | 4 | 9626 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This script changes versions in every pom.xml and relevant files.
(How to run)
python change_version <reef_home> <reef_version_for_pom.xml> -s <true or false> (optional) -p
-s option changes value of 'IsSnapshot' in lang/cs/build.props.
If you use the option "-s false", bulid.props changes as,
<IsSnapshot>false</IsSnapshot>
<SnapshotNumber>00</SnapshotNumber>
If you use "-s true", then the value of 'IsSnapshot' is changed to true.
If you use "-p", then only the "pom.xml" files are changed.
You can also see how to run the script with "python change_version.py -h"
(Example)
python change_version ~/reef 0.14.0 -s true
python change_version ~/reef 0.14.0 -s false
python change_version ~/reef 0.14.0 -p -s true
"""
import os
import re
import sys
import argparse
"""
Get list of path for every file in a directory
"""
def get_filepaths(directory):
file_paths = []
for root, directories, files in os.walk(directory):
for filename in files:
filepath = os.path.join(root, filename)
file_paths.append(filepath)
return file_paths
"""
Change REEF version to new_version in every pom.xml
"""
def change_pom(file, new_version):
changed_str = ""
ready_to_change = False
f = open(file, 'r')
while True:
line = f.readline()
if not line:
break
if "<groupId>org.apache.reef</groupId>" in line:
ready_to_change = True
if "<version>" in line and ready_to_change:
break
changed_str += line
r = re.compile('<version>(.*?)</version>')
m = r.search(line)
old_version = m.group(1)
changed_str += line.replace(old_version, new_version)
while True:
line = f.readline()
if not line:
break
changed_str += line
f.close()
f = open(file, 'w')
f.write(changed_str)
f.close()
"""
Change JavaBridgeJarFileName in lang/cs/Org.Apache.REEF.Driver/DriverConfigGenerator.cs
"""
def change_constants_cs(file, new_version):
changed_str = ""
f = open(file, 'r')
while True:
line = f.readline()
if not line:
break
if "JavaBridgeJarFileName =" in line:
r = re.compile('"(.*?)"')
m = r.search(line)
old_version = m.group(1)
new_version = "reef-bridge-java-" + new_version + "-shaded.jar"
changed_str += line.replace(old_version, new_version)
else:
changed_str += line
f.close()
f = open(file, 'w')
f.write(changed_str)
f.close()
"""
Change version in SharedAssemblyInfo.cs and AssemblyInfo.cpp
"""
def change_assembly_info_cs(file, new_version):
changed_str = ""
new_version = new_version.split("-")[0] + ".0"
f = open(file, 'r')
r = re.compile('"(.*?)"')
while True:
line = f.readline()
if not line:
break
if ("[assembly: AssemblyVersion(" in line and "*" not in line) or ("[assembly: AssemblyFileVersion(" in line) \
or ("[assembly:AssemblyVersionAttribute(" in line and "*" not in line) \
or ("[assembly:AssemblyFileVersion(" in line):
m = r.search(line)
old_version = m.group(1)
changed_str += line.replace(old_version, new_version)
else:
changed_str += line
f.close()
f = open(file, 'w')
f.write(changed_str)
f.close()
"""
Read 'IsSnapshot' from lang/cs/build.props
"""
def read_is_snapshot(file):
f = open(file, 'r')
r = re.compile('<IsSnapshot>(.*?)</IsSnapshot>')
while True:
line = f.readline()
if not line:
break
if "<IsSnapshot>" in line and "</IsSnapshot>" in line:
m = r.search(line)
if(m.group(1)=="true"):
return True
else:
return False
f.close()
"""
Change lang/cs/build.props for the release branch
"""
def change_build_props(file, is_snapshot):
changed_str = ""
f = open(file, 'r')
r1 = re.compile('<IsSnapshot>(.*?)</IsSnapshot>')
r2 = re.compile('<SnapshotNumber>(.*?)</SnapshotNumber>')
while True:
line = f.readline()
if not line:
break
if "<IsSnapshot>" in line and "</IsSnapshot>" in line:
old_is_snapshot = r1.search(line).group(1)
changed_str += line.replace(old_is_snapshot, is_snapshot)
elif "<SnapshotNumber>" in line and "</SnapshotNumber>" in line:
old_snapshot_number = r2.search(line).group(1)
if is_snapshot=="false":
changed_str += line.replace(old_snapshot_number, "00")
else:
changed_str += line.replace(old_snapshot_number, "01")
else:
changed_str += line
f.close()
f = open(file, 'w')
f.write(changed_str)
f.close()
print file
"""
Change the name of shaded.jar in run.cmd and lang/cs/Org.Apache.REEF.Client/Properties/Resources.xml
"""
def change_shaded_jar_name(file, new_version):
changed_str = ""
f = open(file, 'r')
r1 = re.compile('reef-bridge-java-(.*?)-shaded.jar')
r2 = re.compile('reef-bridge-client-(.*?)-shaded.jar')
while True:
line = f.readline()
if not line:
break
m1 = r1.search(line)
m2 = r2.search(line)
if m1 is not None:
changed_str += line.replace(m1.group(1), new_version)
elif m2 is not None:
changed_str += line.replace(m2.group(1), new_version)
else:
changed_str += line
f.close()
f = open(file, 'w')
f.write(changed_str)
f.close()
"""
Change the version in Doxyfile
"""
def change_project_number_Doxyfile(file, new_version):
changed_str = ""
f = open(file, 'r')
while True:
line = f.readline()
if not line:
break
if "PROJECT_NUMBER = " in line:
r = re.compile('= (.*?)$')
m = r.search(line)
old_version = m.group(1)
changed_str += line.replace(old_version, new_version)
else:
changed_str += line
f.close()
f = open(file, 'w')
f.write(changed_str)
f.close()
"""
Change version of every pom.xml, SharedAssemblyInfo.cs,
AssemblyInfo.cpp, run.cmd and Resources.xml
"""
def change_version(reef_home, new_version, pom_only):
if pom_only:
for fi in get_filepaths(reef_home):
if "pom.xml" in fi:
print fi
change_pom(fi, new_version)
else:
for fi in get_filepaths(reef_home):
if "pom.xml" in fi:
print fi
change_pom(fi, new_version)
if "SharedAssemblyInfo.cs" in fi:
print fi
change_assembly_info_cs(fi, new_version)
change_assembly_info_cs(reef_home + "/lang/cs/Org.Apache.REEF.Bridge/AssemblyInfo.cpp", new_version)
print reef_home + "/lang/cs/Org.Apache.REEF.Bridge/AssemblyInfo.cpp"
change_assembly_info_cs(reef_home + "/lang/cs/Org.Apache.REEF.ClrDriver/AssemblyInfo.cpp", new_version)
print reef_home + "/lang/cs/Org.Apache.REEF.ClrDriver/AssemblyInfo.cpp"
change_constants_cs(reef_home + "/lang/cs/Org.Apache.REEF.Driver/DriverConfigGenerator.cs", new_version)
print reef_home + "/lang/cs/Org.Apache.REEF.Driver/DriverConfigGenerator.cs"
change_shaded_jar_name(reef_home + "/lang/cs/Org.Apache.REEF.Client/Properties/Resources.xml", new_version)
print reef_home + "/lang/cs/Org.Apache.REEF.Client/Properties/Resources.xml"
change_shaded_jar_name(reef_home + "/lang/cs/Org.Apache.REEF.Client/run.cmd", new_version)
print reef_home + "/lang/cs/Org.Apache.REEF.Client/run.cmd"
change_project_number_Doxyfile(reef_home + "/Doxyfile", new_version)
print reef_home + "/Doxyfile"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Script for changing REEF version in all files that use it")
parser.add_argument("reef_home", type=str, help="REEF home")
parser.add_argument("reef_version", type=str, help="REEF version")
parser.add_argument("-s", "--isSnapshot", type=str, metavar="<true or false>", help="Change 'IsSnapshot' to true or false", required=True)
parser.add_argument("-p", "--pomonly", help="Change only poms", action="store_true")
args = parser.parse_args()
reef_home = os.path.abspath(args.reef_home)
reef_version = args.reef_version
is_snapshot = args.isSnapshot
pom_only = args.pomonly
if is_snapshot is not None and not pom_only:
change_build_props(reef_home + "/lang/cs/build.props", is_snapshot)
if is_snapshot=="true":
reef_version += "-SNAPSHOT"
change_version(reef_home, reef_version, pom_only)
| apache-2.0 |
kindersung/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/stream.py | 673 | 2748 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file exports public symbols.
"""
from mod_pywebsocket._stream_base import BadOperationException
from mod_pywebsocket._stream_base import ConnectionTerminatedException
from mod_pywebsocket._stream_base import InvalidFrameException
from mod_pywebsocket._stream_base import InvalidUTF8Exception
from mod_pywebsocket._stream_base import UnsupportedFrameException
from mod_pywebsocket._stream_hixie75 import StreamHixie75
from mod_pywebsocket._stream_hybi import Frame
from mod_pywebsocket._stream_hybi import Stream
from mod_pywebsocket._stream_hybi import StreamOptions
# These methods are intended to be used by WebSocket client developers to have
# their implementations receive broken data in tests.
from mod_pywebsocket._stream_hybi import create_close_frame
from mod_pywebsocket._stream_hybi import create_header
from mod_pywebsocket._stream_hybi import create_length_header
from mod_pywebsocket._stream_hybi import create_ping_frame
from mod_pywebsocket._stream_hybi import create_pong_frame
from mod_pywebsocket._stream_hybi import create_binary_frame
from mod_pywebsocket._stream_hybi import create_text_frame
from mod_pywebsocket._stream_hybi import create_closing_handshake_body
# vi:sts=4 sw=4 et
| mpl-2.0 |
lazyuser/google-diff-match-patch | python3/diff_match_patch_test.py | 284 | 41615 | #!/usr/bin/python3
"""Test harness for diff_match_patch.py
Copyright 2006 Google Inc.
http://code.google.com/p/google-diff-match-patch/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import imp
import sys
import time
import unittest
import diff_match_patch as dmp_module
# Force a module reload. Allows one to edit the DMP module and rerun the tests
# without leaving the Python interpreter.
imp.reload(dmp_module)
class DiffMatchPatchTest(unittest.TestCase):
def setUp(self):
"Test harness for dmp_module."
self.dmp = dmp_module.diff_match_patch()
def diff_rebuildtexts(self, diffs):
# Construct the two texts which made up the diff originally.
text1 = ""
text2 = ""
for x in range(0, len(diffs)):
if diffs[x][0] != dmp_module.diff_match_patch.DIFF_INSERT:
text1 += diffs[x][1]
if diffs[x][0] != dmp_module.diff_match_patch.DIFF_DELETE:
text2 += diffs[x][1]
return (text1, text2)
class DiffTest(DiffMatchPatchTest):
"""DIFF TEST FUNCTIONS"""
def testDiffCommonPrefix(self):
# Detect any common prefix.
# Null case.
self.assertEqual(0, self.dmp.diff_commonPrefix("abc", "xyz"))
# Non-null case.
self.assertEqual(4, self.dmp.diff_commonPrefix("1234abcdef", "1234xyz"))
# Whole case.
self.assertEqual(4, self.dmp.diff_commonPrefix("1234", "1234xyz"))
def testDiffCommonSuffix(self):
# Detect any common suffix.
# Null case.
self.assertEqual(0, self.dmp.diff_commonSuffix("abc", "xyz"))
# Non-null case.
self.assertEqual(4, self.dmp.diff_commonSuffix("abcdef1234", "xyz1234"))
# Whole case.
self.assertEqual(4, self.dmp.diff_commonSuffix("1234", "xyz1234"))
def testDiffCommonOverlap(self):
# Null case.
self.assertEqual(0, self.dmp.diff_commonOverlap("", "abcd"))
# Whole case.
self.assertEqual(3, self.dmp.diff_commonOverlap("abc", "abcd"))
# No overlap.
self.assertEqual(0, self.dmp.diff_commonOverlap("123456", "abcd"))
# Overlap.
self.assertEqual(3, self.dmp.diff_commonOverlap("123456xxx", "xxxabcd"))
# Unicode.
# Some overly clever languages (C#) may treat ligatures as equal to their
# component letters. E.g. U+FB01 == 'fi'
self.assertEqual(0, self.dmp.diff_commonOverlap("fi", "\ufb01i"))
def testDiffHalfMatch(self):
# Detect a halfmatch.
self.dmp.Diff_Timeout = 1
# No match.
self.assertEqual(None, self.dmp.diff_halfMatch("1234567890", "abcdef"))
self.assertEqual(None, self.dmp.diff_halfMatch("12345", "23"))
# Single Match.
self.assertEqual(("12", "90", "a", "z", "345678"), self.dmp.diff_halfMatch("1234567890", "a345678z"))
self.assertEqual(("a", "z", "12", "90", "345678"), self.dmp.diff_halfMatch("a345678z", "1234567890"))
self.assertEqual(("abc", "z", "1234", "0", "56789"), self.dmp.diff_halfMatch("abc56789z", "1234567890"))
self.assertEqual(("a", "xyz", "1", "7890", "23456"), self.dmp.diff_halfMatch("a23456xyz", "1234567890"))
# Multiple Matches.
self.assertEqual(("12123", "123121", "a", "z", "1234123451234"), self.dmp.diff_halfMatch("121231234123451234123121", "a1234123451234z"))
self.assertEqual(("", "-=-=-=-=-=", "x", "", "x-=-=-=-=-=-=-="), self.dmp.diff_halfMatch("x-=-=-=-=-=-=-=-=-=-=-=-=", "xx-=-=-=-=-=-=-="))
self.assertEqual(("-=-=-=-=-=", "", "", "y", "-=-=-=-=-=-=-=y"), self.dmp.diff_halfMatch("-=-=-=-=-=-=-=-=-=-=-=-=y", "-=-=-=-=-=-=-=yy"))
# Non-optimal halfmatch.
# Optimal diff would be -q+x=H-i+e=lloHe+Hu=llo-Hew+y not -qHillo+x=HelloHe-w+Hulloy
self.assertEqual(("qHillo", "w", "x", "Hulloy", "HelloHe"), self.dmp.diff_halfMatch("qHilloHelloHew", "xHelloHeHulloy"))
# Optimal no halfmatch.
self.dmp.Diff_Timeout = 0
self.assertEqual(None, self.dmp.diff_halfMatch("qHilloHelloHew", "xHelloHeHulloy"))
def testDiffLinesToChars(self):
# Convert lines down to characters.
self.assertEqual(("\x01\x02\x01", "\x02\x01\x02", ["", "alpha\n", "beta\n"]), self.dmp.diff_linesToChars("alpha\nbeta\nalpha\n", "beta\nalpha\nbeta\n"))
self.assertEqual(("", "\x01\x02\x03\x03", ["", "alpha\r\n", "beta\r\n", "\r\n"]), self.dmp.diff_linesToChars("", "alpha\r\nbeta\r\n\r\n\r\n"))
self.assertEqual(("\x01", "\x02", ["", "a", "b"]), self.dmp.diff_linesToChars("a", "b"))
# More than 256 to reveal any 8-bit limitations.
n = 300
lineList = []
charList = []
for x in range(1, n + 1):
lineList.append(str(x) + "\n")
charList.append(chr(x))
self.assertEqual(n, len(lineList))
lines = "".join(lineList)
chars = "".join(charList)
self.assertEqual(n, len(chars))
lineList.insert(0, "")
self.assertEqual((chars, "", lineList), self.dmp.diff_linesToChars(lines, ""))
def testDiffCharsToLines(self):
# Convert chars up to lines.
diffs = [(self.dmp.DIFF_EQUAL, "\x01\x02\x01"), (self.dmp.DIFF_INSERT, "\x02\x01\x02")]
self.dmp.diff_charsToLines(diffs, ["", "alpha\n", "beta\n"])
self.assertEqual([(self.dmp.DIFF_EQUAL, "alpha\nbeta\nalpha\n"), (self.dmp.DIFF_INSERT, "beta\nalpha\nbeta\n")], diffs)
# More than 256 to reveal any 8-bit limitations.
n = 300
lineList = []
charList = []
for x in range(1, n + 1):
lineList.append(str(x) + "\n")
charList.append(chr(x))
self.assertEqual(n, len(lineList))
lines = "".join(lineList)
chars = "".join(charList)
self.assertEqual(n, len(chars))
lineList.insert(0, "")
diffs = [(self.dmp.DIFF_DELETE, chars)]
self.dmp.diff_charsToLines(diffs, lineList)
self.assertEqual([(self.dmp.DIFF_DELETE, lines)], diffs)
def testDiffCleanupMerge(self):
# Cleanup a messy diff.
# Null case.
diffs = []
self.dmp.diff_cleanupMerge(diffs)
self.assertEqual([], diffs)
# No change case.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_INSERT, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_INSERT, "c")], diffs)
# Merge equalities.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_EQUAL, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "abc")], diffs)
# Merge deletions.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_DELETE, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "abc")], diffs)
# Merge insertions.
diffs = [(self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_INSERT, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEqual([(self.dmp.DIFF_INSERT, "abc")], diffs)
# Merge interweave.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_DELETE, "c"), (self.dmp.DIFF_INSERT, "d"), (self.dmp.DIFF_EQUAL, "e"), (self.dmp.DIFF_EQUAL, "f")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "ac"), (self.dmp.DIFF_INSERT, "bd"), (self.dmp.DIFF_EQUAL, "ef")], diffs)
# Prefix and suffix detection.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "abc"), (self.dmp.DIFF_DELETE, "dc")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "d"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_EQUAL, "c")], diffs)
# Prefix and suffix detection with equalities.
diffs = [(self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "abc"), (self.dmp.DIFF_DELETE, "dc"), (self.dmp.DIFF_EQUAL, "y")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "xa"), (self.dmp.DIFF_DELETE, "d"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_EQUAL, "cy")], diffs)
# Slide edit left.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_INSERT, "ba"), (self.dmp.DIFF_EQUAL, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEqual([(self.dmp.DIFF_INSERT, "ab"), (self.dmp.DIFF_EQUAL, "ac")], diffs)
# Slide edit right.
diffs = [(self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_INSERT, "ab"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "ca"), (self.dmp.DIFF_INSERT, "ba")], diffs)
# Slide edit left recursive.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_DELETE, "ac"), (self.dmp.DIFF_EQUAL, "x")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "acx")], diffs)
# Slide edit right recursive.
diffs = [(self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "ca"), (self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "xca"), (self.dmp.DIFF_DELETE, "cba")], diffs)
def testDiffCleanupSemanticLossless(self):
# Slide diffs to match logical boundaries.
# Null case.
diffs = []
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEqual([], diffs)
# Blank lines.
diffs = [(self.dmp.DIFF_EQUAL, "AAA\r\n\r\nBBB"), (self.dmp.DIFF_INSERT, "\r\nDDD\r\n\r\nBBB"), (self.dmp.DIFF_EQUAL, "\r\nEEE")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "AAA\r\n\r\n"), (self.dmp.DIFF_INSERT, "BBB\r\nDDD\r\n\r\n"), (self.dmp.DIFF_EQUAL, "BBB\r\nEEE")], diffs)
# Line boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "AAA\r\nBBB"), (self.dmp.DIFF_INSERT, " DDD\r\nBBB"), (self.dmp.DIFF_EQUAL, " EEE")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "AAA\r\n"), (self.dmp.DIFF_INSERT, "BBB DDD\r\n"), (self.dmp.DIFF_EQUAL, "BBB EEE")], diffs)
# Word boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The c"), (self.dmp.DIFF_INSERT, "ow and the c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "The "), (self.dmp.DIFF_INSERT, "cow and the "), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# Alphanumeric boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The-c"), (self.dmp.DIFF_INSERT, "ow-and-the-c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "The-"), (self.dmp.DIFF_INSERT, "cow-and-the-"), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# Hitting the start.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "ax")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "aax")], diffs)
# Hitting the end.
diffs = [(self.dmp.DIFF_EQUAL, "xa"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "xaa"), (self.dmp.DIFF_DELETE, "a")], diffs)
# Sentence boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The xxx. The "), (self.dmp.DIFF_INSERT, "zzz. The "), (self.dmp.DIFF_EQUAL, "yyy.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "The xxx."), (self.dmp.DIFF_INSERT, " The zzz."), (self.dmp.DIFF_EQUAL, " The yyy.")], diffs)
def testDiffCleanupSemantic(self):
# Cleanup semantically trivial equalities.
# Null case.
diffs = []
self.dmp.diff_cleanupSemantic(diffs)
self.assertEqual([], diffs)
# No elimination #1.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "cd"), (self.dmp.DIFF_EQUAL, "12"), (self.dmp.DIFF_DELETE, "e")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "cd"), (self.dmp.DIFF_EQUAL, "12"), (self.dmp.DIFF_DELETE, "e")], diffs)
# No elimination #2.
diffs = [(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "ABC"), (self.dmp.DIFF_EQUAL, "1234"), (self.dmp.DIFF_DELETE, "wxyz")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "ABC"), (self.dmp.DIFF_EQUAL, "1234"), (self.dmp.DIFF_DELETE, "wxyz")], diffs)
# Simple elimination.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "c")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "b")], diffs)
# Backpass elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_EQUAL, "cd"), (self.dmp.DIFF_DELETE, "e"), (self.dmp.DIFF_EQUAL, "f"), (self.dmp.DIFF_INSERT, "g")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "abcdef"), (self.dmp.DIFF_INSERT, "cdfg")], diffs)
# Multiple eliminations.
diffs = [(self.dmp.DIFF_INSERT, "1"), (self.dmp.DIFF_EQUAL, "A"), (self.dmp.DIFF_DELETE, "B"), (self.dmp.DIFF_INSERT, "2"), (self.dmp.DIFF_EQUAL, "_"), (self.dmp.DIFF_INSERT, "1"), (self.dmp.DIFF_EQUAL, "A"), (self.dmp.DIFF_DELETE, "B"), (self.dmp.DIFF_INSERT, "2")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "AB_AB"), (self.dmp.DIFF_INSERT, "1A2_1A2")], diffs)
# Word boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The c"), (self.dmp.DIFF_DELETE, "ow and the c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEqual([(self.dmp.DIFF_EQUAL, "The "), (self.dmp.DIFF_DELETE, "cow and the "), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# No overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "abcxx"), (self.dmp.DIFF_INSERT, "xxdef")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "abcxx"), (self.dmp.DIFF_INSERT, "xxdef")], diffs)
# Overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "abcxxx"), (self.dmp.DIFF_INSERT, "xxxdef")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "xxx"), (self.dmp.DIFF_INSERT, "def")], diffs)
# Reverse overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "xxxabc"), (self.dmp.DIFF_INSERT, "defxxx")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEqual([(self.dmp.DIFF_INSERT, "def"), (self.dmp.DIFF_EQUAL, "xxx"), (self.dmp.DIFF_DELETE, "abc")], diffs)
# Two overlap eliminations.
diffs = [(self.dmp.DIFF_DELETE, "abcd1212"), (self.dmp.DIFF_INSERT, "1212efghi"), (self.dmp.DIFF_EQUAL, "----"), (self.dmp.DIFF_DELETE, "A3"), (self.dmp.DIFF_INSERT, "3BC")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "abcd"), (self.dmp.DIFF_EQUAL, "1212"), (self.dmp.DIFF_INSERT, "efghi"), (self.dmp.DIFF_EQUAL, "----"), (self.dmp.DIFF_DELETE, "A"), (self.dmp.DIFF_EQUAL, "3"), (self.dmp.DIFF_INSERT, "BC")], diffs)
def testDiffCleanupEfficiency(self):
# Cleanup operationally trivial equalities.
self.dmp.Diff_EditCost = 4
# Null case.
diffs = []
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEqual([], diffs)
# No elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")], diffs)
# Four-edit elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "abxyzcd"), (self.dmp.DIFF_INSERT, "12xyz34")], diffs)
# Three-edit elimination.
diffs = [(self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "xcd"), (self.dmp.DIFF_INSERT, "12x34")], diffs)
# Backpass elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "xy"), (self.dmp.DIFF_INSERT, "34"), (self.dmp.DIFF_EQUAL, "z"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "56")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "abxyzcd"), (self.dmp.DIFF_INSERT, "12xy34z56")], diffs)
# High cost elimination.
self.dmp.Diff_EditCost = 5
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEqual([(self.dmp.DIFF_DELETE, "abwxyzcd"), (self.dmp.DIFF_INSERT, "12wxyz34")], diffs)
self.dmp.Diff_EditCost = 4
def testDiffPrettyHtml(self):
# Pretty print.
diffs = [(self.dmp.DIFF_EQUAL, "a\n"), (self.dmp.DIFF_DELETE, "<B>b</B>"), (self.dmp.DIFF_INSERT, "c&d")]
self.assertEqual("<span>a¶<br></span><del style=\"background:#ffe6e6;\"><B>b</B></del><ins style=\"background:#e6ffe6;\">c&d</ins>", self.dmp.diff_prettyHtml(diffs))
def testDiffText(self):
# Compute the source and destination texts.
diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, " lazy")]
self.assertEqual("jumps over the lazy", self.dmp.diff_text1(diffs))
self.assertEqual("jumped over a lazy", self.dmp.diff_text2(diffs))
def testDiffDelta(self):
# Convert a diff into delta string.
diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, " lazy"), (self.dmp.DIFF_INSERT, "old dog")]
text1 = self.dmp.diff_text1(diffs)
self.assertEqual("jumps over the lazy", text1)
delta = self.dmp.diff_toDelta(diffs)
self.assertEqual("=4\t-1\t+ed\t=6\t-3\t+a\t=5\t+old dog", delta)
# Convert delta string into a diff.
self.assertEqual(diffs, self.dmp.diff_fromDelta(text1, delta))
# Generates error (19 != 20).
try:
self.dmp.diff_fromDelta(text1 + "x", delta)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Generates error (19 != 18).
try:
self.dmp.diff_fromDelta(text1[1:], delta)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Generates error (%c3%xy invalid Unicode).
# Note: Python 3 can decode this.
#try:
# self.dmp.diff_fromDelta("", "+%c3xy")
# self.assertFalse(True)
#except ValueError:
# # Exception expected.
# pass
# Test deltas with special characters.
diffs = [(self.dmp.DIFF_EQUAL, "\u0680 \x00 \t %"), (self.dmp.DIFF_DELETE, "\u0681 \x01 \n ^"), (self.dmp.DIFF_INSERT, "\u0682 \x02 \\ |")]
text1 = self.dmp.diff_text1(diffs)
self.assertEqual("\u0680 \x00 \t %\u0681 \x01 \n ^", text1)
delta = self.dmp.diff_toDelta(diffs)
self.assertEqual("=7\t-7\t+%DA%82 %02 %5C %7C", delta)
# Convert delta string into a diff.
self.assertEqual(diffs, self.dmp.diff_fromDelta(text1, delta))
# Verify pool of unchanged characters.
diffs = [(self.dmp.DIFF_INSERT, "A-Z a-z 0-9 - _ . ! ~ * ' ( ) ; / ? : @ & = + $ , # ")]
text2 = self.dmp.diff_text2(diffs)
self.assertEqual("A-Z a-z 0-9 - _ . ! ~ * \' ( ) ; / ? : @ & = + $ , # ", text2)
delta = self.dmp.diff_toDelta(diffs)
self.assertEqual("+A-Z a-z 0-9 - _ . ! ~ * \' ( ) ; / ? : @ & = + $ , # ", delta)
# Convert delta string into a diff.
self.assertEqual(diffs, self.dmp.diff_fromDelta("", delta))
def testDiffXIndex(self):
# Translate a location in text1 to text2.
self.assertEqual(5, self.dmp.diff_xIndex([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "1234"), (self.dmp.DIFF_EQUAL, "xyz")], 2))
# Translation on deletion.
self.assertEqual(1, self.dmp.diff_xIndex([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "1234"), (self.dmp.DIFF_EQUAL, "xyz")], 3))
def testDiffLevenshtein(self):
# Levenshtein with trailing equality.
self.assertEqual(4, self.dmp.diff_levenshtein([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "1234"), (self.dmp.DIFF_EQUAL, "xyz")]))
# Levenshtein with leading equality.
self.assertEqual(4, self.dmp.diff_levenshtein([(self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "1234")]))
# Levenshtein with middle equality.
self.assertEqual(7, self.dmp.diff_levenshtein([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_INSERT, "1234")]))
def testDiffBisect(self):
# Normal.
a = "cat"
b = "map"
# Since the resulting diff hasn't been normalized, it would be ok if
# the insertion and deletion pairs are swapped.
# If the order changes, tweak this test as required.
self.assertEqual([(self.dmp.DIFF_DELETE, "c"), (self.dmp.DIFF_INSERT, "m"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "t"), (self.dmp.DIFF_INSERT, "p")], self.dmp.diff_bisect(a, b, sys.maxsize))
# Timeout.
self.assertEqual([(self.dmp.DIFF_DELETE, "cat"), (self.dmp.DIFF_INSERT, "map")], self.dmp.diff_bisect(a, b, 0))
def testDiffMain(self):
# Perform a trivial diff.
# Null case.
self.assertEqual([], self.dmp.diff_main("", "", False))
# Equality.
self.assertEqual([(self.dmp.DIFF_EQUAL, "abc")], self.dmp.diff_main("abc", "abc", False))
# Simple insertion.
self.assertEqual([(self.dmp.DIFF_EQUAL, "ab"), (self.dmp.DIFF_INSERT, "123"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("abc", "ab123c", False))
# Simple deletion.
self.assertEqual([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "123"), (self.dmp.DIFF_EQUAL, "bc")], self.dmp.diff_main("a123bc", "abc", False))
# Two insertions.
self.assertEqual([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_INSERT, "123"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_INSERT, "456"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("abc", "a123b456c", False))
# Two deletions.
self.assertEqual([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "123"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "456"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("a123b456c", "abc", False))
# Perform a real diff.
# Switch off the timeout.
self.dmp.Diff_Timeout = 0
# Simple cases.
self.assertEqual([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "b")], self.dmp.diff_main("a", "b", False))
self.assertEqual([(self.dmp.DIFF_DELETE, "Apple"), (self.dmp.DIFF_INSERT, "Banana"), (self.dmp.DIFF_EQUAL, "s are a"), (self.dmp.DIFF_INSERT, "lso"), (self.dmp.DIFF_EQUAL, " fruit.")], self.dmp.diff_main("Apples are a fruit.", "Bananas are also fruit.", False))
self.assertEqual([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "\u0680"), (self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "\t"), (self.dmp.DIFF_INSERT, "\x00")], self.dmp.diff_main("ax\t", "\u0680x\x00", False))
# Overlaps.
self.assertEqual([(self.dmp.DIFF_DELETE, "1"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "y"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "2"), (self.dmp.DIFF_INSERT, "xab")], self.dmp.diff_main("1ayb2", "abxab", False))
self.assertEqual([(self.dmp.DIFF_INSERT, "xaxcx"), (self.dmp.DIFF_EQUAL, "abc"), (self.dmp.DIFF_DELETE, "y")], self.dmp.diff_main("abcy", "xaxcxabc", False))
self.assertEqual([(self.dmp.DIFF_DELETE, "ABCD"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "="), (self.dmp.DIFF_INSERT, "-"), (self.dmp.DIFF_EQUAL, "bcd"), (self.dmp.DIFF_DELETE, "="), (self.dmp.DIFF_INSERT, "-"), (self.dmp.DIFF_EQUAL, "efghijklmnopqrs"), (self.dmp.DIFF_DELETE, "EFGHIJKLMNOefg")], self.dmp.diff_main("ABCDa=bcd=efghijklmnopqrsEFGHIJKLMNOefg", "a-bcd-efghijklmnopqrs", False))
# Large equality.
self.assertEqual([(self.dmp.DIFF_INSERT, " "), (self.dmp.DIFF_EQUAL,"a"), (self.dmp.DIFF_INSERT,"nd"), (self.dmp.DIFF_EQUAL," [[Pennsylvania]]"), (self.dmp.DIFF_DELETE," and [[New")], self.dmp.diff_main("a [[Pennsylvania]] and [[New", " and [[Pennsylvania]]", False))
# Timeout.
self.dmp.Diff_Timeout = 0.1 # 100ms
a = "`Twas brillig, and the slithy toves\nDid gyre and gimble in the wabe:\nAll mimsy were the borogoves,\nAnd the mome raths outgrabe.\n"
b = "I am the very model of a modern major general,\nI've information vegetable, animal, and mineral,\nI know the kings of England, and I quote the fights historical,\nFrom Marathon to Waterloo, in order categorical.\n"
# Increase the text lengths by 1024 times to ensure a timeout.
for x in range(10):
a = a + a
b = b + b
startTime = time.time()
self.dmp.diff_main(a, b)
endTime = time.time()
# Test that we took at least the timeout period.
self.assertTrue(self.dmp.Diff_Timeout <= endTime - startTime)
# Test that we didn't take forever (be forgiving).
# Theoretically this test could fail very occasionally if the
# OS task swaps or locks up for a second at the wrong moment.
self.assertTrue(self.dmp.Diff_Timeout * 2 > endTime - startTime)
self.dmp.Diff_Timeout = 0
# Test the linemode speedup.
# Must be long to pass the 100 char cutoff.
# Simple line-mode.
a = "1234567890\n" * 13
b = "abcdefghij\n" * 13
self.assertEqual(self.dmp.diff_main(a, b, False), self.dmp.diff_main(a, b, True))
# Single line-mode.
a = "1234567890" * 13
b = "abcdefghij" * 13
self.assertEqual(self.dmp.diff_main(a, b, False), self.dmp.diff_main(a, b, True))
# Overlap line-mode.
a = "1234567890\n" * 13
b = "abcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n"
texts_linemode = self.diff_rebuildtexts(self.dmp.diff_main(a, b, True))
texts_textmode = self.diff_rebuildtexts(self.dmp.diff_main(a, b, False))
self.assertEqual(texts_textmode, texts_linemode)
# Test null inputs.
try:
self.dmp.diff_main(None, None)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
class MatchTest(DiffMatchPatchTest):
"""MATCH TEST FUNCTIONS"""
def testMatchAlphabet(self):
# Initialise the bitmasks for Bitap.
self.assertEqual({"a":4, "b":2, "c":1}, self.dmp.match_alphabet("abc"))
self.assertEqual({"a":37, "b":18, "c":8}, self.dmp.match_alphabet("abcaba"))
def testMatchBitap(self):
self.dmp.Match_Distance = 100
self.dmp.Match_Threshold = 0.5
# Exact matches.
self.assertEqual(5, self.dmp.match_bitap("abcdefghijk", "fgh", 5))
self.assertEqual(5, self.dmp.match_bitap("abcdefghijk", "fgh", 0))
# Fuzzy matches.
self.assertEqual(4, self.dmp.match_bitap("abcdefghijk", "efxhi", 0))
self.assertEqual(2, self.dmp.match_bitap("abcdefghijk", "cdefxyhijk", 5))
self.assertEqual(-1, self.dmp.match_bitap("abcdefghijk", "bxy", 1))
# Overflow.
self.assertEqual(2, self.dmp.match_bitap("123456789xx0", "3456789x0", 2))
self.assertEqual(0, self.dmp.match_bitap("abcdef", "xxabc", 4))
self.assertEqual(3, self.dmp.match_bitap("abcdef", "defyy", 4))
self.assertEqual(0, self.dmp.match_bitap("abcdef", "xabcdefy", 0))
# Threshold test.
self.dmp.Match_Threshold = 0.4
self.assertEqual(4, self.dmp.match_bitap("abcdefghijk", "efxyhi", 1))
self.dmp.Match_Threshold = 0.3
self.assertEqual(-1, self.dmp.match_bitap("abcdefghijk", "efxyhi", 1))
self.dmp.Match_Threshold = 0.0
self.assertEqual(1, self.dmp.match_bitap("abcdefghijk", "bcdef", 1))
self.dmp.Match_Threshold = 0.5
# Multiple select.
self.assertEqual(0, self.dmp.match_bitap("abcdexyzabcde", "abccde", 3))
self.assertEqual(8, self.dmp.match_bitap("abcdexyzabcde", "abccde", 5))
# Distance test.
self.dmp.Match_Distance = 10 # Strict location.
self.assertEqual(-1, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdefg", 24))
self.assertEqual(0, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdxxefg", 1))
self.dmp.Match_Distance = 1000 # Loose location.
self.assertEqual(0, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdefg", 24))
def testMatchMain(self):
# Full match.
# Shortcut matches.
self.assertEqual(0, self.dmp.match_main("abcdef", "abcdef", 1000))
self.assertEqual(-1, self.dmp.match_main("", "abcdef", 1))
self.assertEqual(3, self.dmp.match_main("abcdef", "", 3))
self.assertEqual(3, self.dmp.match_main("abcdef", "de", 3))
self.assertEqual(3, self.dmp.match_main("abcdef", "defy", 4))
self.assertEqual(0, self.dmp.match_main("abcdef", "abcdefy", 0))
# Complex match.
self.dmp.Match_Threshold = 0.7
self.assertEqual(4, self.dmp.match_main("I am the very model of a modern major general.", " that berry ", 5))
self.dmp.Match_Threshold = 0.5
# Test null inputs.
try:
self.dmp.match_main(None, None, 0)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
class PatchTest(DiffMatchPatchTest):
"""PATCH TEST FUNCTIONS"""
def testPatchObj(self):
# Patch Object.
p = dmp_module.patch_obj()
p.start1 = 20
p.start2 = 21
p.length1 = 18
p.length2 = 17
p.diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, "\nlaz")]
strp = str(p)
self.assertEqual("@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n", strp)
def testPatchFromText(self):
self.assertEqual([], self.dmp.patch_fromText(""))
strp = "@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n"
self.assertEqual(strp, str(self.dmp.patch_fromText(strp)[0]))
self.assertEqual("@@ -1 +1 @@\n-a\n+b\n", str(self.dmp.patch_fromText("@@ -1 +1 @@\n-a\n+b\n")[0]))
self.assertEqual("@@ -1,3 +0,0 @@\n-abc\n", str(self.dmp.patch_fromText("@@ -1,3 +0,0 @@\n-abc\n")[0]))
self.assertEqual("@@ -0,0 +1,3 @@\n+abc\n", str(self.dmp.patch_fromText("@@ -0,0 +1,3 @@\n+abc\n")[0]))
# Generates error.
try:
self.dmp.patch_fromText("Bad\nPatch\n")
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
def testPatchToText(self):
strp = "@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"
p = self.dmp.patch_fromText(strp)
self.assertEqual(strp, self.dmp.patch_toText(p))
strp = "@@ -1,9 +1,9 @@\n-f\n+F\n oo+fooba\n@@ -7,9 +7,9 @@\n obar\n-,\n+.\n tes\n"
p = self.dmp.patch_fromText(strp)
self.assertEqual(strp, self.dmp.patch_toText(p))
def testPatchAddContext(self):
self.dmp.Patch_Margin = 4
p = self.dmp.patch_fromText("@@ -21,4 +21,10 @@\n-jump\n+somersault\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps over the lazy dog.")
self.assertEqual("@@ -17,12 +17,18 @@\n fox \n-jump\n+somersault\n s ov\n", str(p))
# Same, but not enough trailing context.
p = self.dmp.patch_fromText("@@ -21,4 +21,10 @@\n-jump\n+somersault\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps.")
self.assertEqual("@@ -17,10 +17,16 @@\n fox \n-jump\n+somersault\n s.\n", str(p))
# Same, but not enough leading context.
p = self.dmp.patch_fromText("@@ -3 +3,2 @@\n-e\n+at\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps.")
self.assertEqual("@@ -1,7 +1,8 @@\n Th\n-e\n+at\n qui\n", str(p))
# Same, but with ambiguity.
p = self.dmp.patch_fromText("@@ -3 +3,2 @@\n-e\n+at\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps. The quick brown fox crashes.")
self.assertEqual("@@ -1,27 +1,28 @@\n Th\n-e\n+at\n quick brown fox jumps. \n", str(p))
def testPatchMake(self):
# Null case.
patches = self.dmp.patch_make("", "")
self.assertEqual("", self.dmp.patch_toText(patches))
text1 = "The quick brown fox jumps over the lazy dog."
text2 = "That quick brown fox jumped over a lazy dog."
# Text2+Text1 inputs.
expectedPatch = "@@ -1,8 +1,7 @@\n Th\n-at\n+e\n qui\n@@ -21,17 +21,18 @@\n jump\n-ed\n+s\n over \n-a\n+the\n laz\n"
# The second patch must be "-21,17 +21,18", not "-22,17 +21,18" due to rolling context.
patches = self.dmp.patch_make(text2, text1)
self.assertEqual(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Text2 inputs.
expectedPatch = "@@ -1,11 +1,12 @@\n Th\n-e\n+at\n quick b\n@@ -22,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"
patches = self.dmp.patch_make(text1, text2)
self.assertEqual(expectedPatch, self.dmp.patch_toText(patches))
# Diff input.
diffs = self.dmp.diff_main(text1, text2, False)
patches = self.dmp.patch_make(diffs)
self.assertEqual(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Diff inputs.
patches = self.dmp.patch_make(text1, diffs)
self.assertEqual(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Text2+Diff inputs (deprecated).
patches = self.dmp.patch_make(text1, text2, diffs)
self.assertEqual(expectedPatch, self.dmp.patch_toText(patches))
# Character encoding.
patches = self.dmp.patch_make("`1234567890-=[]\\;',./", "~!@#$%^&*()_+{}|:\"<>?")
self.assertEqual("@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n", self.dmp.patch_toText(patches))
# Character decoding.
diffs = [(self.dmp.DIFF_DELETE, "`1234567890-=[]\\;',./"), (self.dmp.DIFF_INSERT, "~!@#$%^&*()_+{}|:\"<>?")]
self.assertEqual(diffs, self.dmp.patch_fromText("@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n")[0].diffs)
# Long string with repeats.
text1 = ""
for x in range(100):
text1 += "abcdef"
text2 = text1 + "123"
expectedPatch = "@@ -573,28 +573,31 @@\n cdefabcdefabcdefabcdefabcdef\n+123\n"
patches = self.dmp.patch_make(text1, text2)
self.assertEqual(expectedPatch, self.dmp.patch_toText(patches))
# Test null inputs.
try:
self.dmp.patch_make(None, None)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
def testPatchSplitMax(self):
# Assumes that Match_MaxBits is 32.
patches = self.dmp.patch_make("abcdefghijklmnopqrstuvwxyz01234567890", "XabXcdXefXghXijXklXmnXopXqrXstXuvXwxXyzX01X23X45X67X89X0")
self.dmp.patch_splitMax(patches)
self.assertEqual("@@ -1,32 +1,46 @@\n+X\n ab\n+X\n cd\n+X\n ef\n+X\n gh\n+X\n ij\n+X\n kl\n+X\n mn\n+X\n op\n+X\n qr\n+X\n st\n+X\n uv\n+X\n wx\n+X\n yz\n+X\n 012345\n@@ -25,13 +39,18 @@\n zX01\n+X\n 23\n+X\n 45\n+X\n 67\n+X\n 89\n+X\n 0\n", self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("abcdef1234567890123456789012345678901234567890123456789012345678901234567890uvwxyz", "abcdefuvwxyz")
oldToText = self.dmp.patch_toText(patches)
self.dmp.patch_splitMax(patches)
self.assertEqual(oldToText, self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("1234567890123456789012345678901234567890123456789012345678901234567890", "abc")
self.dmp.patch_splitMax(patches)
self.assertEqual("@@ -1,32 +1,4 @@\n-1234567890123456789012345678\n 9012\n@@ -29,32 +1,4 @@\n-9012345678901234567890123456\n 7890\n@@ -57,14 +1,3 @@\n-78901234567890\n+abc\n", self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1", "abcdefghij , h : 1 , t : 1 abcdefghij , h : 1 , t : 1 abcdefghij , h : 0 , t : 1")
self.dmp.patch_splitMax(patches)
self.assertEqual("@@ -2,32 +2,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n@@ -29,32 +29,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n", self.dmp.patch_toText(patches))
def testPatchAddPadding(self):
# Both edges full.
patches = self.dmp.patch_make("", "test")
self.assertEqual("@@ -0,0 +1,4 @@\n+test\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEqual("@@ -1,8 +1,12 @@\n %01%02%03%04\n+test\n %01%02%03%04\n", self.dmp.patch_toText(patches))
# Both edges partial.
patches = self.dmp.patch_make("XY", "XtestY")
self.assertEqual("@@ -1,2 +1,6 @@\n X\n+test\n Y\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEqual("@@ -2,8 +2,12 @@\n %02%03%04X\n+test\n Y%01%02%03\n", self.dmp.patch_toText(patches))
# Both edges none.
patches = self.dmp.patch_make("XXXXYYYY", "XXXXtestYYYY")
self.assertEqual("@@ -1,8 +1,12 @@\n XXXX\n+test\n YYYY\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEqual("@@ -5,8 +5,12 @@\n XXXX\n+test\n YYYY\n", self.dmp.patch_toText(patches))
def testPatchApply(self):
self.dmp.Match_Distance = 1000
self.dmp.Match_Threshold = 0.5
self.dmp.Patch_DeleteThreshold = 0.5
# Null case.
patches = self.dmp.patch_make("", "")
results = self.dmp.patch_apply(patches, "Hello world.")
self.assertEqual(("Hello world.", []), results)
# Exact match.
patches = self.dmp.patch_make("The quick brown fox jumps over the lazy dog.", "That quick brown fox jumped over a lazy dog.")
results = self.dmp.patch_apply(patches, "The quick brown fox jumps over the lazy dog.")
self.assertEqual(("That quick brown fox jumped over a lazy dog.", [True, True]), results)
# Partial match.
results = self.dmp.patch_apply(patches, "The quick red rabbit jumps over the tired tiger.")
self.assertEqual(("That quick red rabbit jumped over a tired tiger.", [True, True]), results)
# Failed match.
results = self.dmp.patch_apply(patches, "I am the very model of a modern major general.")
self.assertEqual(("I am the very model of a modern major general.", [False, False]), results)
# Big delete, small change.
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x123456789012345678901234567890-----++++++++++-----123456789012345678901234567890y")
self.assertEqual(("xabcy", [True, True]), results)
# Big delete, big change 1.
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x12345678901234567890---------------++++++++++---------------12345678901234567890y")
self.assertEqual(("xabc12345678901234567890---------------++++++++++---------------12345678901234567890y", [False, True]), results)
# Big delete, big change 2.
self.dmp.Patch_DeleteThreshold = 0.6
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x12345678901234567890---------------++++++++++---------------12345678901234567890y")
self.assertEqual(("xabcy", [True, True]), results)
self.dmp.Patch_DeleteThreshold = 0.5
# Compensate for failed patch.
self.dmp.Match_Threshold = 0.0
self.dmp.Match_Distance = 0
patches = self.dmp.patch_make("abcdefghijklmnopqrstuvwxyz--------------------1234567890", "abcXXXXXXXXXXdefghijklmnopqrstuvwxyz--------------------1234567YYYYYYYYYY890")
results = self.dmp.patch_apply(patches, "ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567890")
self.assertEqual(("ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567YYYYYYYYYY890", [False, True]), results)
self.dmp.Match_Threshold = 0.5
self.dmp.Match_Distance = 1000
# No side effects.
patches = self.dmp.patch_make("", "test")
patchstr = self.dmp.patch_toText(patches)
results = self.dmp.patch_apply(patches, "")
self.assertEqual(patchstr, self.dmp.patch_toText(patches))
# No side effects with major delete.
patches = self.dmp.patch_make("The quick brown fox jumps over the lazy dog.", "Woof")
patchstr = self.dmp.patch_toText(patches)
self.dmp.patch_apply(patches, "The quick brown fox jumps over the lazy dog.")
self.assertEqual(patchstr, self.dmp.patch_toText(patches))
# Edge exact match.
patches = self.dmp.patch_make("", "test")
self.dmp.patch_apply(patches, "")
self.assertEqual(("test", [True]), results)
# Near edge exact match.
patches = self.dmp.patch_make("XY", "XtestY")
results = self.dmp.patch_apply(patches, "XY")
self.assertEqual(("XtestY", [True]), results)
# Edge partial match.
patches = self.dmp.patch_make("y", "y123")
results = self.dmp.patch_apply(patches, "x")
self.assertEqual(("x123", [True]), results)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
thSoft/lilypond-hu | scripts/build/yyout2grammar.py | 8 | 5387 | #!@PYTHON@
#
# yyout2grammar.py
# This file is part of LilyPond, the GNU music typesetter.
#
# Copyright (C) 2005 by Carl D. Sorensen <c_sorensen@byu.edu>
#
# LilyPond is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LilyPond is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LilyPond. If not, see <http://www.gnu.org/licenses/>.
# Convert from bison output file parser.output to
# Grammar and index.
# Drops all of the state information.
# Converts \\ to \
# Eliminates the @ variables created when {} is placed
# in the middle of a rule
# Eliminates all of the c-code stuff
# Wraps lines that are longer than 78 characters for improved
# formatting
#
# to create input file, run
# bison -v parser.yy
# this will create a file parser.output
# then run
# yyout2grammar.py parser.output your_output_file
#
import sys
import re
atre = re.compile('(@\d+):')
intro_re = re.compile (r'(.*[:|])\s')
keyword_re = re.compile (r'(\S+)\s')
# strip extra backslashes that are inserted by the python
# string handling routines
def strip_backslash(input_string):
i=input_string.find(r'"\\')
while i > -1 :
input_string = input_string[:i+1]+input_string[i+2:]
i = input_string.find(r'"\\')
return input_string
# write an output line, adjusting to make sure that max_line_length
# is not exceeded
def write_line (output_line, output_file):
max_line_length = 78
indent_value = 3
if len(output_line) > max_line_length:
intro = intro_re.match(output_line)
if intro:
output_file.write(intro.group(1)+" ")
indent_column = len(intro.group(1))
output_line = output_line[indent_column:]
keyword = keyword_re.search(output_line)
while keyword:
output_file.write(strip_backslash(keyword.group(1))+" \n")
output_line = output_line[keyword.end(1):]
keyword = keyword_re.search(output_line)
if keyword:
output_file.write("".rjust(indent_column + indent_value))
else:
output_file.write(strip_backslash(output_line))
return
write_me = True
if len(sys.argv)!=3:
print "Usage: yyout2grammar.py parser_output_file grammar_file."
else:
in_name = sys.argv[1]
out_name = sys.argv[2]
in_file = open(in_name,'r')
out_file= open(out_name, 'w')
at_items=[]
inline = in_file.readline()
## skip header material from file
while inline != '' and not(inline.startswith("Grammar")):
inline = in_file.readline()
## process the Grammar lines
if inline != '':
write_line(inline, out_file)
inline = in_file.readline()
while inline != '' and not(inline.startswith("Terminals")):
i = inline.find("$accept:")
if i>-1:
write_me = False
inline = in_file.readline()
atfound = re.findall(atre,inline)
if len(atfound) > 0:
at_items.extend(atfound)
# print at_items
write_me = False
inline=in_file.readline()
else:
for at_item in at_items:
i=inline.find(at_item)
## remove @ item
if i >= 0:
inline=inline[:i] + inline[i+len(at_item):]
if write_me:
write_line(inline, out_file)
inline = in_file.readline()
write_me = True
index_items = []
# Write the Terminals header line and the following blank line
write_line(inline, out_file)
inline = in_file.readline()
write_line(inline, out_file)
inline = in_file.readline()
while inline != '' and not(inline.startswith("Nonterminals")):
i=inline.find('"\\\\')
while i > -1 :
inline = inline[:i+1]+inline[i+2:]
i = inline.find('"\\\\')
index_items.append(inline)
inline = in_file.readline()
index_items.sort(lambda x,y:cmp(x.lower(),y.lower()))
for index_item in index_items:
write_line (index_item, out_file)
write_line ('\n', out_file)
# Write the Nonterminals header and the blank line
write_line(inline, out_file)
inline = in_file.readline()
write_line(inline, out_file)
index_items = []
index_item=in_file.readline()
inline=in_file.readline()
while inline != '' and not(inline.startswith("state")):
while inline.startswith(" "):
index_item = index_item + inline
inline = in_file.readline()
if not(index_item.startswith("@")) and \
not(index_item.startswith("$accept")):
index_items.append(index_item)
index_item = inline
inline=in_file.readline()
index_items.sort(lambda x,y:cmp(x.lower(),y.lower()))
for index_item in index_items:
write_line (index_item, out_file)
| gpl-3.0 |
benoitsteiner/tensorflow-xsmm | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 28 | 12795 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic word2vec example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import sys
import argparse
import random
from tempfile import gettempdir
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
# Give a folder path as an argument with '--log_dir' to save
# TensorBoard summaries. Default is a log folder in current directory.
current_path = os.path.dirname(os.path.realpath(sys.argv[0]))
parser = argparse.ArgumentParser()
parser.add_argument(
'--log_dir',
type=str,
default=os.path.join(current_path, 'log'),
help='The log directory for TensorBoard summaries.')
FLAGS, unparsed = parser.parse_known_args()
# Create the directory for TensorBoard variables if there is not.
if not os.path.exists(FLAGS.log_dir):
os.makedirs(FLAGS.log_dir)
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
# pylint: disable=redefined-outer-name
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
local_filename = os.path.join(gettempdir(), filename)
if not os.path.exists(local_filename):
local_filename, _ = urllib.request.urlretrieve(url + filename,
local_filename)
statinfo = os.stat(local_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception('Failed to verify ' + local_filename +
'. Can you get to it with a browser?')
return local_filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0: # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
# Filling 4 global variables:
# data - list of codes (integers from 0 to vocabulary_size-1).
# This is the original text but words are replaced by their codes
# count - map of words(strings) to count of occurrences
# dictionary - map of words(strings) to their codes(integers)
# reverse_dictionary - maps codes(integers) to words(strings)
data, count, dictionary, reverse_dictionary = build_dataset(
vocabulary, vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span) # pylint: disable=redefined-builtin
if data_index + span > len(data):
data_index = 0
buffer.extend(data[data_index:data_index + span])
data_index += span
for i in range(batch_size // num_skips):
context_words = [w for w in range(span) if w != skip_window]
words_to_use = random.sample(context_words, num_skips)
for j, context_word in enumerate(words_to_use):
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[context_word]
if data_index == len(data):
buffer.extend(data[0:span])
data_index = span
else:
buffer.append(data[data_index])
data_index += 1
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]], '->', labels[i, 0],
reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
num_sampled = 64 # Number of negative examples to sample.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent. These 3 variables are used only for
# displaying model accuracy, they don't affect calculation.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
graph = tf.Graph()
with graph.as_default():
# Input data.
with tf.name_scope('inputs'):
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
with tf.name_scope('embeddings'):
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
with tf.name_scope('weights'):
nce_weights = tf.Variable(
tf.truncated_normal(
[vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
with tf.name_scope('biases'):
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
# Explanation of the meaning of NCE loss:
# http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/
with tf.name_scope('loss'):
loss = tf.reduce_mean(
tf.nn.nce_loss(
weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Add the loss value as a scalar to summary.
tf.summary.scalar('loss', loss)
# Construct the SGD optimizer using a learning rate of 1.0.
with tf.name_scope('optimizer'):
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings,
valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Merge all summaries.
merged = tf.summary.merge_all()
# Add variable initializer.
init = tf.global_variables_initializer()
# Create a saver.
saver = tf.train.Saver()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# Open a writer to write summaries.
writer = tf.summary.FileWriter(FLAGS.log_dir, session.graph)
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(batch_size, num_skips,
skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# Define metadata variable.
run_metadata = tf.RunMetadata()
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
# Also, evaluate the merged op to get all summaries from the returned "summary" variable.
# Feed metadata variable to session for visualizing the graph in TensorBoard.
_, summary, loss_val = session.run(
[optimizer, merged, loss],
feed_dict=feed_dict,
run_metadata=run_metadata)
average_loss += loss_val
# Add returned summaries to writer in each step.
writer.add_summary(summary, step)
# Add metadata to visualize the graph for the last run.
if step == (num_steps - 1):
writer.add_run_metadata(run_metadata, 'step%d' % step)
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Write corresponding labels for the embeddings.
with open(FLAGS.log_dir + '/metadata.tsv', 'w') as f:
for i in xrange(vocabulary_size):
f.write(reverse_dictionary[i] + '\n')
# Save the model for checkpoints.
saver.save(session, os.path.join(FLAGS.log_dir, 'model.ckpt'))
# Create a configuration for visualizing embeddings with the labels in TensorBoard.
config = projector.ProjectorConfig()
embedding_conf = config.embeddings.add()
embedding_conf.tensor_name = embeddings.name
embedding_conf.metadata_path = os.path.join(FLAGS.log_dir, 'metadata.tsv')
projector.visualize_embeddings(writer, config)
writer.close()
# Step 6: Visualize the embeddings.
# pylint: disable=missing-docstring
# Function to draw visualization of distance between embeddings.
def plot_with_labels(low_dim_embs, labels, filename):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(
label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(
perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels, os.path.join(gettempdir(), 'tsne.png'))
except ImportError as ex:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
print(ex)
| apache-2.0 |
ceibal-tatu/sugar | extensions/cpsection/updater/view.py | 4 | 14155 | # Copyright (C) 2008, One Laptop Per Child
# Copyright (C) 2009, Tomeu Vizoso
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from gettext import gettext as _
from gettext import ngettext
import locale
import logging
from gi.repository import GObject
from gi.repository import Gtk
from sugar3.graphics import style
from sugar3.graphics.icon import Icon, CellRendererIcon
from jarabe.controlpanel.sectionview import SectionView
from model import UpdateModel
_DEBUG_VIEW_ALL = True
class ActivityUpdater(SectionView):
def __init__(self, model, alerts):
SectionView.__init__(self)
self._model = UpdateModel()
self._model.connect('progress', self.__progress_cb)
self.set_spacing(style.DEFAULT_SPACING)
self.set_border_width(style.DEFAULT_SPACING * 2)
self._top_label = Gtk.Label()
self._top_label.set_line_wrap(True)
self._top_label.set_justify(Gtk.Justification.LEFT)
self._top_label.props.xalign = 0
self.pack_start(self._top_label, False, True, 0)
self._top_label.show()
separator = Gtk.HSeparator()
self.pack_start(separator, False, True, 0)
separator.show()
bottom_label = Gtk.Label()
bottom_label.set_line_wrap(True)
bottom_label.set_justify(Gtk.Justification.LEFT)
bottom_label.props.xalign = 0
bottom_label.set_markup(
_('Software updates correct errors, eliminate security ' \
'vulnerabilities, and provide new features.'))
self.pack_start(bottom_label, False, True, 0)
bottom_label.show()
self._update_box = None
self._progress_pane = None
self._refresh()
def _switch_to_update_box(self):
if self._update_box in self.get_children():
return
if self._progress_pane in self.get_children():
self.remove(self._progress_pane)
self._progress_pane = None
if self._update_box is None:
self._update_box = UpdateBox(self._model)
self._update_box.refresh_button.connect('clicked',
self.__refresh_button_clicked_cb)
self._update_box.install_button.connect('clicked',
self.__install_button_clicked_cb)
self.pack_start(self._update_box, expand=True, fill=True, padding=0)
self._update_box.show()
def _switch_to_progress_pane(self):
if self._progress_pane in self.get_children():
return
if self._update_box in self.get_children():
self.remove(self._update_box)
self._update_box = None
if self._progress_pane is None:
self._progress_pane = ProgressPane()
self._progress_pane.cancel_button.connect('clicked',
self.__cancel_button_clicked_cb)
self.pack_start(self._progress_pane, expand=True, fill=False, padding=0)
self._progress_pane.show()
def _clear_center(self):
if self._progress_pane in self.get_children():
self.remove(self._progress_pane)
self._progress_pane = None
if self._update_box in self.get_children():
self.remove(self._update_box)
self._update_box = None
def __progress_cb(self, model, action, bundle_name, current, total):
if current == total and action == UpdateModel.ACTION_CHECKING:
self._finished_checking()
return
elif current == total:
self._finished_updating(int(current))
return
if action == UpdateModel.ACTION_CHECKING:
message = _('Checking %s...') % bundle_name
elif action == UpdateModel.ACTION_DOWNLOADING:
message = _('Downloading %s...') % bundle_name
elif action == UpdateModel.ACTION_UPDATING:
message = _('Updating %s...') % bundle_name
self._switch_to_progress_pane()
self._progress_pane.set_message(message)
self._progress_pane.set_progress(current / float(total))
def _finished_checking(self):
logging.debug('ActivityUpdater._finished_checking')
available_updates = len(self._model.updates)
if not available_updates:
top_message = _('Your software is up-to-date')
else:
top_message = ngettext('You can install %s update',
'You can install %s updates',
available_updates)
top_message = top_message % available_updates
top_message = GObject.markup_escape_text(top_message)
self._top_label.set_markup('<big>%s</big>' % top_message)
if not available_updates:
self._clear_center()
else:
self._switch_to_update_box()
self._update_box.refresh()
def __refresh_button_clicked_cb(self, button):
self._refresh()
def _refresh(self):
top_message = _('Checking for updates...')
self._top_label.set_markup('<big>%s</big>' % top_message)
self._model.check_updates()
def __install_button_clicked_cb(self, button):
text = '<big>%s</big>' % _('Installing updates...')
self._top_label.set_markup(text)
self._model.update(self._update_box.get_bundles_to_update())
def __cancel_button_clicked_cb(self, button):
self._model.cancel()
def _finished_updating(self, installed_updates):
logging.debug('ActivityUpdater._finished_updating')
top_message = ngettext('%s update was installed',
'%s updates were installed', installed_updates)
top_message = top_message % installed_updates
top_message = GObject.markup_escape_text(top_message)
self._top_label.set_markup('<big>%s</big>' % top_message)
self._clear_center()
def undo(self):
self._model.cancel()
class ProgressPane(Gtk.VBox):
"""Container which replaces the `ActivityPane` during refresh or
install."""
def __init__(self):
Gtk.VBox.__init__(self)
self.set_spacing(style.DEFAULT_PADDING)
self.set_border_width(style.DEFAULT_SPACING * 2)
self._progress = Gtk.ProgressBar()
self.pack_start(self._progress, True, True, 0)
self._progress.show()
self._label = Gtk.Label()
self._label.set_line_wrap(True)
self._label.set_property('xalign', 0.5)
self._label.modify_fg(Gtk.StateType.NORMAL,
style.COLOR_BUTTON_GREY.get_gdk_color())
self.pack_start(self._label, True, True, 0)
self._label.show()
alignment_box = Gtk.Alignment.new(xalign=0.5, yalign=0.5,
xscale=0, yscale=0)
self.pack_start(alignment_box, True, True, 0)
alignment_box.show()
self.cancel_button = Gtk.Button(stock=Gtk.STOCK_CANCEL)
alignment_box.add(self.cancel_button)
self.cancel_button.show()
def set_message(self, message):
self._label.set_text(message)
def set_progress(self, fraction):
self._progress.props.fraction = fraction
class UpdateBox(Gtk.VBox):
def __init__(self, model):
Gtk.VBox.__init__(self)
self._model = model
self.set_spacing(style.DEFAULT_PADDING)
scrolled_window = Gtk.ScrolledWindow()
scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self.pack_start(scrolled_window, True, True, 0)
scrolled_window.show()
self._update_list = UpdateList(model)
self._update_list.props.model.connect('row-changed',
self.__row_changed_cb)
scrolled_window.add(self._update_list)
self._update_list.show()
bottom_box = Gtk.HBox()
bottom_box.set_spacing(style.DEFAULT_SPACING)
self.pack_start(bottom_box, False, True, 0)
bottom_box.show()
self._size_label = Gtk.Label()
self._size_label.props.xalign = 0
self._size_label.set_justify(Gtk.Justification.LEFT)
bottom_box.pack_start(self._size_label, True, True, 0)
self._size_label.show()
self.refresh_button = Gtk.Button(stock=Gtk.STOCK_REFRESH)
bottom_box.pack_start(self.refresh_button, False, True, 0)
self.refresh_button.show()
self.install_button = Gtk.Button(_('Install selected'))
self.install_button.props.image = Icon(icon_name='emblem-downloads',
icon_size=Gtk.IconSize.BUTTON)
bottom_box.pack_start(self.install_button, False, True, 0)
self.install_button.show()
self._update_total_size_label()
def refresh(self):
self._update_list.refresh()
def __row_changed_cb(self, list_model, path, iterator):
self._update_total_size_label()
self._update_install_button()
def _update_total_size_label(self):
total_size = 0
for row in self._update_list.props.model:
if row[UpdateListModel.SELECTED]:
total_size += row[UpdateListModel.SIZE]
markup = _('Download size: %s') % _format_size(total_size)
self._size_label.set_markup(markup)
def _update_install_button(self):
for row in self._update_list.props.model:
if row[UpdateListModel.SELECTED]:
self.install_button.props.sensitive = True
return
self.install_button.props.sensitive = False
def get_bundles_to_update(self):
bundles_to_update = []
for row in self._update_list.props.model:
if row[UpdateListModel.SELECTED]:
bundles_to_update.append(row[UpdateListModel.BUNDLE_ID])
return bundles_to_update
class UpdateList(Gtk.TreeView):
def __init__(self, model):
list_model = UpdateListModel(model)
Gtk.TreeView.__init__(self, list_model)
self.set_reorderable(False)
self.set_enable_search(False)
self.set_headers_visible(False)
toggle_renderer = Gtk.CellRendererToggle()
toggle_renderer.props.activatable = True
toggle_renderer.props.xpad = style.DEFAULT_PADDING
toggle_renderer.props.indicator_size = style.zoom(26)
toggle_renderer.connect('toggled', self.__toggled_cb)
toggle_column = Gtk.TreeViewColumn()
toggle_column.pack_start(toggle_renderer, True)
toggle_column.add_attribute(toggle_renderer, 'active',
UpdateListModel.SELECTED)
self.append_column(toggle_column)
icon_renderer = CellRendererIcon(self)
icon_renderer.props.width = style.STANDARD_ICON_SIZE
icon_renderer.props.height = style.STANDARD_ICON_SIZE
icon_renderer.props.size = style.STANDARD_ICON_SIZE
icon_renderer.props.xpad = style.DEFAULT_PADDING
icon_renderer.props.ypad = style.DEFAULT_PADDING
icon_renderer.props.stroke_color = style.COLOR_TOOLBAR_GREY.get_svg()
icon_renderer.props.fill_color = style.COLOR_TRANSPARENT.get_svg()
icon_column = Gtk.TreeViewColumn()
icon_column.pack_start(icon_renderer, True)
icon_column.add_attribute(icon_renderer, 'file-name',
UpdateListModel.ICON_FILE_NAME)
self.append_column(icon_column)
text_renderer = Gtk.CellRendererText()
description_column = Gtk.TreeViewColumn()
description_column.pack_start(text_renderer, True)
description_column.add_attribute(text_renderer, 'markup',
UpdateListModel.DESCRIPTION)
self.append_column(description_column)
def __toggled_cb(self, cell_renderer, path):
row = self.props.model[path]
row[UpdateListModel.SELECTED] = not row[UpdateListModel.SELECTED]
def refresh(self):
pass
class UpdateListModel(Gtk.ListStore):
BUNDLE_ID = 0
SELECTED = 1
ICON_FILE_NAME = 2
DESCRIPTION = 3
SIZE = 4
def __init__(self, model):
Gtk.ListStore.__init__(self, str, bool, str, str, int)
for bundle_update in model.updates:
row = [None] * 5
row[self.BUNDLE_ID] = bundle_update.bundle.get_bundle_id()
row[self.SELECTED] = True
row[self.ICON_FILE_NAME] = bundle_update.bundle.get_icon()
details = _('From version %(current)s to %(new)s (Size: %(size)s)')
details = details % \
{'current': bundle_update.bundle.get_activity_version(),
'new': bundle_update.version,
'size': _format_size(bundle_update.size)}
row[self.DESCRIPTION] = '<b>%s</b>\n%s' % \
(bundle_update.bundle.get_name(), details)
row[self.SIZE] = bundle_update.size
self.append(row)
def _format_size(size):
"""Convert a given size in bytes to a nicer better readable unit"""
if size == 0:
# TRANS: download size is 0
return _('None')
elif size < 1024:
# TRANS: download size of very small updates
return _('1 KB')
elif size < 1024 * 1024:
# TRANS: download size of small updates, e.g. '250 KB'
return locale.format_string(_('%.0f KB'), size / 1024.0)
else:
# TRANS: download size of updates, e.g. '2.3 MB'
return locale.format_string(_('%.1f MB'), size / 1024.0 / 1024)
| gpl-2.0 |
ychen820/microblog | y/google-cloud-sdk/lib/pyasn1/type/base.py | 35 | 9405 | # Base classes for ASN.1 types
import sys
from pyasn1.type import constraint, tagmap
from pyasn1 import error
class Asn1Item: pass
class Asn1ItemBase(Asn1Item):
# Set of tags for this ASN.1 type
tagSet = ()
# A list of constraint.Constraint instances for checking values
subtypeSpec = constraint.ConstraintsIntersection()
# Used for ambiguous ASN.1 types identification
typeId = None
def __init__(self, tagSet=None, subtypeSpec=None):
if tagSet is None:
self._tagSet = self.tagSet
else:
self._tagSet = tagSet
if subtypeSpec is None:
self._subtypeSpec = self.subtypeSpec
else:
self._subtypeSpec = subtypeSpec
def _verifySubtypeSpec(self, value, idx=None):
try:
self._subtypeSpec(value, idx)
except error.PyAsn1Error:
c, i, t = sys.exc_info()
raise c('%s at %s' % (i, self.__class__.__name__))
def getSubtypeSpec(self): return self._subtypeSpec
def getTagSet(self): return self._tagSet
def getEffectiveTagSet(self): return self._tagSet # used by untagged types
def getTagMap(self): return tagmap.TagMap({self._tagSet: self})
def isSameTypeWith(self, other):
return self is other or \
self._tagSet == other.getTagSet() and \
self._subtypeSpec == other.getSubtypeSpec()
def isSuperTypeOf(self, other):
"""Returns true if argument is a ASN1 subtype of ourselves"""
return self._tagSet.isSuperTagSetOf(other.getTagSet()) and \
self._subtypeSpec.isSuperTypeOf(other.getSubtypeSpec())
class __NoValue:
def __getattr__(self, attr):
raise error.PyAsn1Error('No value for %s()' % attr)
def __getitem__(self, i):
raise error.PyAsn1Error('No value')
noValue = __NoValue()
# Base class for "simple" ASN.1 objects. These are immutable.
class AbstractSimpleAsn1Item(Asn1ItemBase):
defaultValue = noValue
def __init__(self, value=None, tagSet=None, subtypeSpec=None):
Asn1ItemBase.__init__(self, tagSet, subtypeSpec)
if value is None or value is noValue:
value = self.defaultValue
if value is None or value is noValue:
self.__hashedValue = value = noValue
else:
value = self.prettyIn(value)
self._verifySubtypeSpec(value)
self.__hashedValue = hash(value)
self._value = value
self._len = None
def __repr__(self):
if self._value is noValue:
return self.__class__.__name__ + '()'
else:
return self.__class__.__name__ + '(%s)' % (self.prettyOut(self._value),)
def __str__(self): return str(self._value)
def __eq__(self, other):
return self is other and True or self._value == other
def __ne__(self, other): return self._value != other
def __lt__(self, other): return self._value < other
def __le__(self, other): return self._value <= other
def __gt__(self, other): return self._value > other
def __ge__(self, other): return self._value >= other
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self._value)
else:
def __bool__(self): return bool(self._value)
def __hash__(self): return self.__hashedValue
def clone(self, value=None, tagSet=None, subtypeSpec=None):
if value is None and tagSet is None and subtypeSpec is None:
return self
if value is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
return self.__class__(value, tagSet, subtypeSpec)
def subtype(self, value=None, implicitTag=None, explicitTag=None,
subtypeSpec=None):
if value is None:
value = self._value
if implicitTag is not None:
tagSet = self._tagSet.tagImplicitly(implicitTag)
elif explicitTag is not None:
tagSet = self._tagSet.tagExplicitly(explicitTag)
else:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
else:
subtypeSpec = subtypeSpec + self._subtypeSpec
return self.__class__(value, tagSet, subtypeSpec)
def prettyIn(self, value): return value
def prettyOut(self, value): return str(value)
def prettyPrint(self, scope=0): return self.prettyOut(self._value)
# XXX Compatibility stub
def prettyPrinter(self, scope=0): return self.prettyPrint(scope)
#
# Constructed types:
# * There are five of them: Sequence, SequenceOf/SetOf, Set and Choice
# * ASN1 types and values are represened by Python class instances
# * Value initialization is made for defaulted components only
# * Primary method of component addressing is by-position. Data model for base
# type is Python sequence. Additional type-specific addressing methods
# may be implemented for particular types.
# * SequenceOf and SetOf types do not implement any additional methods
# * Sequence, Set and Choice types also implement by-identifier addressing
# * Sequence, Set and Choice types also implement by-asn1-type (tag) addressing
# * Sequence and Set types may include optional and defaulted
# components
# * Constructed types hold a reference to component types used for value
# verification and ordering.
# * Component type is a scalar type for SequenceOf/SetOf types and a list
# of types for Sequence/Set/Choice.
#
class AbstractConstructedAsn1Item(Asn1ItemBase):
componentType = None
sizeSpec = constraint.ConstraintsIntersection()
def __init__(self, componentType=None, tagSet=None,
subtypeSpec=None, sizeSpec=None):
Asn1ItemBase.__init__(self, tagSet, subtypeSpec)
if componentType is None:
self._componentType = self.componentType
else:
self._componentType = componentType
if sizeSpec is None:
self._sizeSpec = self.sizeSpec
else:
self._sizeSpec = sizeSpec
self._componentValues = []
self._componentValuesSet = 0
def __repr__(self):
r = self.__class__.__name__ + '()'
for idx in range(len(self._componentValues)):
if self._componentValues[idx] is None:
continue
r = r + '.setComponentByPosition(%s, %r)' % (
idx, self._componentValues[idx]
)
return r
def __eq__(self, other):
return self is other and True or self._componentValues == other
def __ne__(self, other): return self._componentValues != other
def __lt__(self, other): return self._componentValues < other
def __le__(self, other): return self._componentValues <= other
def __gt__(self, other): return self._componentValues > other
def __ge__(self, other): return self._componentValues >= other
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self._componentValues)
else:
def __bool__(self): return bool(self._componentValues)
def getComponentTagMap(self):
raise error.PyAsn1Error('Method not implemented')
def _cloneComponentValues(self, myClone, cloneValueFlag): pass
def clone(self, tagSet=None, subtypeSpec=None, sizeSpec=None,
cloneValueFlag=None):
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if sizeSpec is None:
sizeSpec = self._sizeSpec
r = self.__class__(self._componentType, tagSet, subtypeSpec, sizeSpec)
if cloneValueFlag:
self._cloneComponentValues(r, cloneValueFlag)
return r
def subtype(self, implicitTag=None, explicitTag=None, subtypeSpec=None,
sizeSpec=None, cloneValueFlag=None):
if implicitTag is not None:
tagSet = self._tagSet.tagImplicitly(implicitTag)
elif explicitTag is not None:
tagSet = self._tagSet.tagExplicitly(explicitTag)
else:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
else:
subtypeSpec = subtypeSpec + self._subtypeSpec
if sizeSpec is None:
sizeSpec = self._sizeSpec
else:
sizeSpec = sizeSpec + self._sizeSpec
r = self.__class__(self._componentType, tagSet, subtypeSpec, sizeSpec)
if cloneValueFlag:
self._cloneComponentValues(r, cloneValueFlag)
return r
def _verifyComponent(self, idx, value): pass
def verifySizeSpec(self): self._sizeSpec(self)
def getComponentByPosition(self, idx):
raise error.PyAsn1Error('Method not implemented')
def setComponentByPosition(self, idx, value, verifyConstraints=True):
raise error.PyAsn1Error('Method not implemented')
def getComponentType(self): return self._componentType
def __getitem__(self, idx): return self.getComponentByPosition(idx)
def __setitem__(self, idx, value): self.setComponentByPosition(idx, value)
def __len__(self): return len(self._componentValues)
def clear(self):
self._componentValues = []
self._componentValuesSet = 0
def setDefaultComponents(self): pass
| bsd-3-clause |
lduarte1991/edx-platform | lms/djangoapps/survey/tests/test_models.py | 10 | 9801 | """
Python tests for the Survey models
"""
from collections import OrderedDict
import ddt
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.test import TestCase
from django.test.client import Client
from survey.exceptions import SurveyFormNameAlreadyExists, SurveyFormNotFound
from survey.models import SurveyAnswer, SurveyForm
@ddt.ddt
class SurveyModelsTests(TestCase):
"""
All tests for the Survey models.py file
"""
def setUp(self):
"""
Set up the test data used in the specific tests
"""
super(SurveyModelsTests, self).setUp()
self.client = Client()
# Create two accounts
self.password = 'abc'
self.student = User.objects.create_user('student', 'student@test.com', self.password)
self.student2 = User.objects.create_user('student2', 'student2@test.com', self.password)
self.test_survey_name = 'TestForm'
self.test_form = '<li><input name="field1" /></li><li><input name="field2" /></li><li><select name="ddl"><option>1</option></select></li>'
self.test_form_update = '<input name="field1" />'
self.course_id = 'foo/bar/baz'
self.student_answers = OrderedDict({
'field1': 'value1',
'field2': 'value2',
})
self.student_answers_update = OrderedDict({
'field1': 'value1-updated',
'field2': 'value2-updated',
})
self.student_answers_update2 = OrderedDict({
'field1': 'value1-updated2',
})
self.student2_answers = OrderedDict({
'field1': 'value3'
})
def _create_test_survey(self):
"""
Helper method to set up test form
"""
return SurveyForm.create(self.test_survey_name, self.test_form)
def test_form_not_found_raise_exception(self):
"""
Asserts that when looking up a form that does not exist
"""
with self.assertRaises(SurveyFormNotFound):
SurveyForm.get(self.test_survey_name)
def test_form_not_found_none(self):
"""
Asserts that when looking up a form that does not exist
"""
self.assertIsNone(SurveyForm.get(self.test_survey_name, throw_if_not_found=False))
def test_create_new_form(self):
"""
Make sure we can create a new form a look it up
"""
survey = self._create_test_survey()
self.assertIsNotNone(survey)
new_survey = SurveyForm.get(self.test_survey_name)
self.assertIsNotNone(new_survey)
self.assertEqual(new_survey.form, self.test_form)
def test_unicode_rendering(self):
"""
See if the survey form returns the expected unicode string
"""
survey = self._create_test_survey()
self.assertIsNotNone(survey)
self.assertEquals(unicode(survey), self.test_survey_name)
def test_create_form_with_malformed_html(self):
"""
Make sure that if a SurveyForm is saved with unparseable html
an exception is thrown
"""
with self.assertRaises(ValidationError):
SurveyForm.create('badform', '<input name="oops" /><<<>')
def test_create_form_with_no_fields(self):
"""
Make sure that if a SurveyForm is saved without any named fields
an exception is thrown
"""
with self.assertRaises(ValidationError):
SurveyForm.create('badform', '<p>no input fields here</p>')
with self.assertRaises(ValidationError):
SurveyForm.create('badform', '<input id="input_without_name" />')
def test_create_form_already_exists(self):
"""
Make sure we can't create two surveys of the same name
"""
self._create_test_survey()
with self.assertRaises(SurveyFormNameAlreadyExists):
self._create_test_survey()
def test_create_form_update_existing(self):
"""
Make sure we can update an existing form
"""
survey = self._create_test_survey()
self.assertIsNotNone(survey)
survey = SurveyForm.create(self.test_survey_name, self.test_form_update, update_if_exists=True)
self.assertIsNotNone(survey)
survey = SurveyForm.get(self.test_survey_name)
self.assertIsNotNone(survey)
self.assertEquals(survey.form, self.test_form_update)
def test_survey_has_no_answers(self):
"""
Create a new survey and assert that there are no answers to that survey
"""
survey = self._create_test_survey()
self.assertEquals(len(survey.get_answers()), 0)
def test_user_has_no_answers(self):
"""
Create a new survey with no answers in it and check that a user is determined to not have answered it
"""
survey = self._create_test_survey()
self.assertFalse(survey.has_user_answered_survey(self.student))
self.assertEquals(len(survey.get_answers()), 0)
@ddt.data(None, 'foo/bar/baz')
def test_single_user_answers(self, course_id):
"""
Create a new survey and add answers to it
"""
survey = self._create_test_survey()
self.assertIsNotNone(survey)
survey.save_user_answers(self.student, self.student_answers, course_id)
self.assertTrue(survey.has_user_answered_survey(self.student))
all_answers = survey.get_answers()
self.assertEquals(len(all_answers.keys()), 1)
self.assertIn(self.student.id, all_answers)
self.assertEquals(all_answers[self.student.id], self.student_answers)
answers = survey.get_answers(self.student)
self.assertEquals(len(answers.keys()), 1)
self.assertIn(self.student.id, answers)
self.assertEquals(all_answers[self.student.id], self.student_answers)
# check that the course_id was set
answer_objs = SurveyAnswer.objects.filter(
user=self.student,
form=survey
)
for answer_obj in answer_objs:
if course_id:
self.assertEquals(unicode(answer_obj.course_key), course_id)
else:
self.assertIsNone(answer_obj.course_key)
def test_multiple_user_answers(self):
"""
Create a new survey and add answers to it
"""
survey = self._create_test_survey()
self.assertIsNotNone(survey)
survey.save_user_answers(self.student, self.student_answers, self.course_id)
survey.save_user_answers(self.student2, self.student2_answers, self.course_id)
self.assertTrue(survey.has_user_answered_survey(self.student))
all_answers = survey.get_answers()
self.assertEquals(len(all_answers.keys()), 2)
self.assertIn(self.student.id, all_answers)
self.assertIn(self.student2.id, all_answers)
self.assertEquals(all_answers[self.student.id], self.student_answers)
self.assertEquals(all_answers[self.student2.id], self.student2_answers)
answers = survey.get_answers(self.student)
self.assertEquals(len(answers.keys()), 1)
self.assertIn(self.student.id, answers)
self.assertEquals(answers[self.student.id], self.student_answers)
answers = survey.get_answers(self.student2)
self.assertEquals(len(answers.keys()), 1)
self.assertIn(self.student2.id, answers)
self.assertEquals(answers[self.student2.id], self.student2_answers)
def test_update_answers(self):
"""
Make sure the update case works
"""
survey = self._create_test_survey()
self.assertIsNotNone(survey)
survey.save_user_answers(self.student, self.student_answers, self.course_id)
answers = survey.get_answers(self.student)
self.assertEquals(len(answers.keys()), 1)
self.assertIn(self.student.id, answers)
self.assertEquals(answers[self.student.id], self.student_answers)
# update
survey.save_user_answers(self.student, self.student_answers_update, self.course_id)
answers = survey.get_answers(self.student)
self.assertEquals(len(answers.keys()), 1)
self.assertIn(self.student.id, answers)
self.assertEquals(answers[self.student.id], self.student_answers_update)
# update with just a subset of the origin dataset
survey.save_user_answers(self.student, self.student_answers_update2, self.course_id)
answers = survey.get_answers(self.student)
self.assertEquals(len(answers.keys()), 1)
self.assertIn(self.student.id, answers)
self.assertEquals(answers[self.student.id], self.student_answers_update2)
def test_limit_num_users(self):
"""
Verify that the limit_num_users parameter to get_answers()
works as intended
"""
survey = self._create_test_survey()
survey.save_user_answers(self.student, self.student_answers, self.course_id)
survey.save_user_answers(self.student2, self.student2_answers, self.course_id)
# even though we have 2 users submitted answers
# limit the result set to just 1
all_answers = survey.get_answers(limit_num_users=1)
self.assertEquals(len(all_answers.keys()), 1)
def test_get_field_names(self):
"""
Create a new survey and add answers to it
"""
survey = self._create_test_survey()
self.assertIsNotNone(survey)
survey.save_user_answers(self.student, self.student_answers, self.course_id)
survey.save_user_answers(self.student2, self.student2_answers, self.course_id)
names = survey.get_field_names()
self.assertEqual(sorted(names), ['ddl', 'field1', 'field2'])
| agpl-3.0 |
MyAOSP/external_chromium_org | tools/telemetry/telemetry/page/actions/media_action.py | 24 | 1749 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Common media action functions."""
import logging
import os
from telemetry.core import util
from telemetry.page.actions import page_action
class MediaAction(page_action.PageAction):
def WillRunAction(self, page, tab):
"""Loads the common media action JS code prior to running the action."""
self.LoadJS(tab, 'media_action.js')
def RunAction(self, page, tab, previous_action):
super(MediaAction, self).RunAction(page, tab, previous_action)
def LoadJS(self, tab, js_file_name):
"""Loads and executes a JS file in the tab."""
with open(os.path.join(os.path.dirname(__file__), js_file_name)) as f:
js = f.read()
tab.ExecuteJavaScript(js)
def WaitForEvent(self, tab, selector, event_name, timeout):
"""Halts media action until the selector's event is fired.
Args:
tab: The tab to check for event on.
selector: Media element selector.
event_name: Name of the event to check if fired or not.
timeout: Timeout to check for event, throws an exception if not fired.
"""
util.WaitFor(lambda:
self.HasEventCompletedOrError(tab, selector, event_name),
timeout=timeout)
def HasEventCompletedOrError(self, tab, selector, event_name):
if tab.EvaluateJavaScript(
'window.__hasEventCompleted("%s", "%s");' % (selector, event_name)):
return True
error = tab.EvaluateJavaScript('window.__error')
if error:
logging.error('Detected media error while waiting for %s: %s', event_name,
error)
return True
return False
| bsd-3-clause |
vrenaville/OCB | addons/hw_posbox_upgrade/controllers/main.py | 172 | 4161 | # -*- coding: utf-8 -*-
import logging
import os
import time
import openerp
import openerp.addons.hw_proxy.controllers.main as hw_proxy
import threading
from openerp import http
from openerp.http import request
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
upgrade_template = """
<!DOCTYPE HTML>
<html>
<head>
<title>Odoo's PosBox - Software Upgrade</title>
<script src="http://code.jquery.com/jquery-1.11.0.min.js"></script>
<script>
$(function(){
var upgrading = false;
$('#upgrade').click(function(){
console.log('click');
if(!upgrading){
upgrading = true;
$('#upgrade').text('Upgrading, Please Wait');
$.ajax({
url:'/hw_proxy/perform_upgrade/'
}).then(function(status){
$('#upgrade').html('Upgrade Successful<br \\>Click to Restart the PosBox');
$('#upgrade').off('click');
$('#upgrade').click(function(){
$.ajax({ url:'/hw_proxy/perform_restart' })
$('#upgrade').text('Restarting');
$('#upgrade').off('click');
setTimeout(function(){
window.location = '/'
},30*1000);
});
},function(){
$('#upgrade').text('Upgrade Failed');
});
}
});
});
</script>
<style>
body {
width: 480px;
margin: 60px auto;
font-family: sans-serif;
text-align: justify;
color: #6B6B6B;
}
.centering{
text-align: center;
}
#upgrade {
padding: 20px;
background: rgb(121, 197, 107);
color: white;
border-radius: 3px;
text-align: center;
margin: 30px;
text-decoration: none;
display: inline-block;
}
</style>
</head>
<body>
<h1>PosBox Software Upgrade</h1>
<p>
This tool will help you perform an upgrade of the PosBox's software.
However the preferred method to upgrade the posbox is to flash the sd-card with
the <a href='http://nightly.openerp.com/trunk/posbox/'>latest image</a>. The upgrade
procedure is explained into to the <a href='/hw_proxy/static/doc/manual.pdf'>PosBox manual</a>
</p>
<p>
To upgrade the posbox, click on the upgrade button. The upgrade will take a few minutes. <b>Do not reboot</b> the PosBox during the upgrade.
</p>
<div class='centering'>
<a href='#' id='upgrade'>Upgrade</a>
</div>
</body>
</html>
"""
class PosboxUpgrader(hw_proxy.Proxy):
def __init__(self):
super(PosboxUpgrader,self).__init__()
self.upgrading = threading.Lock()
self.last_upgrade = 0
@http.route('/hw_proxy/upgrade', type='http', auth='none', )
def upgrade(self):
return upgrade_template
@http.route('/hw_proxy/perform_upgrade', type='http', auth='none')
def perform_upgrade(self):
self.upgrading.acquire()
if time.time() - self.last_upgrade < 30:
self.upgrading.release()
return 'UPTODATE'
else:
os.system('/bin/bash /home/pi/openerp/update.sh')
self.last_upgrade = time.time()
self.upgrading.release()
return 'SUCCESS'
@http.route('/hw_proxy/perform_restart', type='http', auth='none')
def perform_restart(self):
self.upgrading.acquire()
if time.time() - self.last_upgrade < 30:
self.upgrading.release()
return 'RESTARTED'
else:
os.system('/bin/bash /home/pi/openerp/restart.sh')
self.last_upgrade = time.time()
self.upgrading.release()
return 'SUCCESS'
| agpl-3.0 |
frishberg/django | tests/template_tests/filter_tests/test_timeuntil.py | 12 | 4457 | from __future__ import unicode_literals
from datetime import datetime, timedelta
from django.template.defaultfilters import timeuntil_filter
from django.test import SimpleTestCase
from django.test.utils import requires_tz_support
from ..utils import setup
from .timezone_utils import TimezoneTestCase
class TimeuntilTests(TimezoneTestCase):
# Default compare with datetime.now()
@setup({'timeuntil01': '{{ a|timeuntil }}'})
def test_timeuntil01(self):
output = self.engine.render_to_string('timeuntil01', {'a': datetime.now() + timedelta(minutes=2, seconds=10)})
self.assertEqual(output, '2\xa0minutes')
@setup({'timeuntil02': '{{ a|timeuntil }}'})
def test_timeuntil02(self):
output = self.engine.render_to_string('timeuntil02', {'a': (datetime.now() + timedelta(days=1, seconds=10))})
self.assertEqual(output, '1\xa0day')
@setup({'timeuntil03': '{{ a|timeuntil }}'})
def test_timeuntil03(self):
output = self.engine.render_to_string(
'timeuntil03', {'a': (datetime.now() + timedelta(hours=8, minutes=10, seconds=10))}
)
self.assertEqual(output, '8\xa0hours, 10\xa0minutes')
# Compare to a given parameter
@setup({'timeuntil04': '{{ a|timeuntil:b }}'})
def test_timeuntil04(self):
output = self.engine.render_to_string(
'timeuntil04',
{'a': self.now - timedelta(days=1), 'b': self.now - timedelta(days=2)},
)
self.assertEqual(output, '1\xa0day')
@setup({'timeuntil05': '{{ a|timeuntil:b }}'})
def test_timeuntil05(self):
output = self.engine.render_to_string(
'timeuntil05',
{'a': self.now - timedelta(days=2), 'b': self.now - timedelta(days=2, minutes=1)},
)
self.assertEqual(output, '1\xa0minute')
# Regression for #7443
@setup({'timeuntil06': '{{ earlier|timeuntil }}'})
def test_timeuntil06(self):
output = self.engine.render_to_string('timeuntil06', {'earlier': self.now - timedelta(days=7)})
self.assertEqual(output, '0\xa0minutes')
@setup({'timeuntil07': '{{ earlier|timeuntil:now }}'})
def test_timeuntil07(self):
output = self.engine.render_to_string(
'timeuntil07', {'now': self.now, 'earlier': self.now - timedelta(days=7)}
)
self.assertEqual(output, '0\xa0minutes')
@setup({'timeuntil08': '{{ later|timeuntil }}'})
def test_timeuntil08(self):
output = self.engine.render_to_string('timeuntil08', {'later': self.now + timedelta(days=7, hours=1)})
self.assertEqual(output, '1\xa0week')
@setup({'timeuntil09': '{{ later|timeuntil:now }}'})
def test_timeuntil09(self):
output = self.engine.render_to_string('timeuntil09', {'now': self.now, 'later': self.now + timedelta(days=7)})
self.assertEqual(output, '1\xa0week')
# Differing timezones are calculated correctly.
@requires_tz_support
@setup({'timeuntil10': '{{ a|timeuntil }}'})
def test_timeuntil10(self):
output = self.engine.render_to_string('timeuntil10', {'a': self.now_tz})
self.assertEqual(output, '0\xa0minutes')
@requires_tz_support
@setup({'timeuntil11': '{{ a|timeuntil }}'})
def test_timeuntil11(self):
output = self.engine.render_to_string('timeuntil11', {'a': self.now_tz_i})
self.assertEqual(output, '0\xa0minutes')
@setup({'timeuntil12': '{{ a|timeuntil:b }}'})
def test_timeuntil12(self):
output = self.engine.render_to_string('timeuntil12', {'a': self.now_tz_i, 'b': self.now_tz})
self.assertEqual(output, '0\xa0minutes')
# Regression for #9065 (two date objects).
@setup({'timeuntil13': '{{ a|timeuntil:b }}'})
def test_timeuntil13(self):
output = self.engine.render_to_string('timeuntil13', {'a': self.today, 'b': self.today})
self.assertEqual(output, '0\xa0minutes')
@setup({'timeuntil14': '{{ a|timeuntil:b }}'})
def test_timeuntil14(self):
output = self.engine.render_to_string('timeuntil14', {'a': self.today, 'b': self.today - timedelta(hours=24)})
self.assertEqual(output, '1\xa0day')
class FunctionTests(SimpleTestCase):
def test_until_now(self):
self.assertEqual(timeuntil_filter(datetime.now() + timedelta(1, 1)), '1\xa0day')
def test_explicit_date(self):
self.assertEqual(timeuntil_filter(datetime(2005, 12, 30), datetime(2005, 12, 29)), '1\xa0day')
| bsd-3-clause |
spirrello/spirrello-pynet-work | applied_python/lib/python2.7/site-packages/pexpect/async.py | 14 | 2151 | import asyncio
import errno
from pexpect import EOF
@asyncio.coroutine
def expect_async(expecter, timeout=None):
# First process data that was previously read - if it maches, we don't need
# async stuff.
previously_read = expecter.spawn.buffer
expecter.spawn.buffer = expecter.spawn.string_type()
idx = expecter.new_data(previously_read)
if idx is not None:
return idx
transport, pw = yield from asyncio.get_event_loop()\
.connect_read_pipe(lambda: PatternWaiter(expecter), expecter.spawn)
try:
return (yield from asyncio.wait_for(pw.fut, timeout))
except asyncio.TimeoutError as e:
transport.pause_reading()
return expecter.timeout(e)
class PatternWaiter(asyncio.Protocol):
def __init__(self, expecter):
self.expecter = expecter
self.fut = asyncio.Future()
def found(self, result):
if not self.fut.done():
self.fut.set_result(result)
def error(self, exc):
if not self.fut.done():
self.fut.set_exception(exc)
def data_received(self, data):
spawn = self.expecter.spawn
s = spawn._decoder.decode(data)
spawn._log(s, 'read')
if self.fut.done():
spawn.buffer += data
return
try:
index = self.expecter.new_data(data)
if index is not None:
# Found a match
self.found(index)
except Exception as e:
self.expecter.errored()
self.error(e)
def eof_received(self):
# N.B. If this gets called, async will close the pipe (the spawn object)
# for us
try:
self.expecter.spawn.flag_eof = True
index = self.expecter.eof()
except EOF as e:
self.error(e)
else:
self.found(index)
def connection_lost(self, exc):
if isinstance(exc, OSError) and exc.errno == errno.EIO:
# We may get here without eof_received being called, e.g on Linux
self.eof_received()
elif exc is not None:
self.error(exc)
| gpl-3.0 |
mogoweb/chromium-crosswalk | tools/site_compare/commands/measure.py | 189 | 1573 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Command for measuring how long pages take to load in a browser.
Prerequisites:
1. The command_line package from tools/site_compare
2. Either the IE BHO or Firefox extension (or both)
Installation:
1. Build the IE BHO, or call regsvr32 on a prebuilt binary
2. Add a file called "measurepageloadtimeextension@google.com" to
the default Firefox profile directory under extensions, containing
the path to the Firefox extension root
Invoke with the command line arguments as documented within
the command line.
"""
import command_line
import win32process
from drivers import windowing
from utils import browser_iterate
def CreateCommand(cmdline):
"""Inserts the command and arguments into a command line for parsing."""
cmd = cmdline.AddCommand(
["measure"],
"Measures how long a series of URLs takes to load in one or more browsers.",
None,
ExecuteMeasure)
browser_iterate.SetupIterationCommandLine(cmd)
cmd.AddArgument(
["-log", "--logfile"], "File to write output", type="string", required=True)
def ExecuteMeasure(command):
"""Executes the Measure command."""
def LogResult(url, proc, wnd, result):
"""Write the result of the browse to the log file."""
log_file.write(result)
log_file = open(command["--logfile"], "w")
browser_iterate.Iterate(command, LogResult)
# Close the log file and return. We're done.
log_file.close()
| bsd-3-clause |
toddeye/home-assistant | homeassistant/components/notify/instapush.py | 7 | 2855 | """
homeassistant.components.notify.instapush
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Instapush notification service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.instapush/
"""
import logging
import json
import requests
from homeassistant.helpers import validate_config
from homeassistant.components.notify import (
DOMAIN, ATTR_TITLE, BaseNotificationService)
from homeassistant.const import CONF_API_KEY
_LOGGER = logging.getLogger(__name__)
_RESOURCE = 'https://api.instapush.im/v1/'
def get_service(hass, config):
""" Get the instapush notification service. """
if not validate_config({DOMAIN: config},
{DOMAIN: [CONF_API_KEY,
'app_secret',
'event',
'tracker']},
_LOGGER):
return None
headers = {'x-instapush-appid': config[CONF_API_KEY],
'x-instapush-appsecret': config['app_secret']}
try:
response = requests.get(_RESOURCE + 'events/list',
headers=headers).json()
except ValueError:
_LOGGER.error('Unexpected answer from Instapush API.')
return None
if 'error' in response:
_LOGGER.error(response['msg'])
return None
if len([app for app in response if app['title'] == config['event']]) == 0:
_LOGGER.error(
"No app match your given value. "
"Please create an app at https://instapush.im")
return None
return InstapushNotificationService(
config[CONF_API_KEY], config['app_secret'], config['event'],
config['tracker'])
# pylint: disable=too-few-public-methods
class InstapushNotificationService(BaseNotificationService):
""" Implements notification service for Instapush. """
def __init__(self, api_key, app_secret, event, tracker):
self._api_key = api_key
self._app_secret = app_secret
self._event = event
self._tracker = tracker
self._headers = {
'x-instapush-appid': self._api_key,
'x-instapush-appsecret': self._app_secret,
'Content-Type': 'application/json'}
def send_message(self, message="", **kwargs):
""" Send a message to a user. """
title = kwargs.get(ATTR_TITLE)
data = {"event": self._event,
"trackers": {self._tracker: title + " : " + message}}
response = requests.post(_RESOURCE + 'post', data=json.dumps(data),
headers=self._headers)
if response.json()['status'] == 401:
_LOGGER.error(
response.json()['msg'],
"Please check your details at https://instapush.im/")
| mit |
teltek/edx-platform | lms/djangoapps/notifier_api/views.py | 23 | 1576 | from django.contrib.auth.models import User
from rest_framework import pagination
from rest_framework.response import Response
from rest_framework.viewsets import ReadOnlyModelViewSet
from notification_prefs import NOTIFICATION_PREF_KEY
from notifier_api.serializers import NotifierUserSerializer
from openedx.core.lib.api.permissions import ApiKeyHeaderPermission
class NotifierPaginator(pagination.PageNumberPagination):
"""
Paginator for the notifier API.
"""
page_size = 10
page_size_query_param = "page_size"
def get_paginated_response(self, data):
"""
Construct a response with pagination information.
"""
return Response({
'next': self.get_next_link(),
'previous': self.get_previous_link(),
'count': self.page.paginator.count,
'results': data
})
class NotifierUsersViewSet(ReadOnlyModelViewSet):
"""
An endpoint that the notifier can use to retrieve users who have enabled
daily forum digests, including all information that the notifier needs about
such users.
"""
permission_classes = (ApiKeyHeaderPermission,)
serializer_class = NotifierUserSerializer
pagination_class = NotifierPaginator
# See NotifierUserSerializer for notes about related tables
queryset = User.objects.filter(
preferences__key=NOTIFICATION_PREF_KEY
).select_related(
"profile"
).prefetch_related(
"preferences",
"courseenrollment_set",
"course_groups",
"roles__permissions"
)
| agpl-3.0 |
wbhuber/local_swift_branch | test/unit/account/test_replicator.py | 18 | 5744 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import unittest
import shutil
from swift.account import replicator, backend, server
from swift.common.utils import normalize_timestamp
from swift.common.storage_policy import POLICIES
from test.unit.common import test_db_replicator
class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
backend = backend.AccountBroker
datadir = server.DATADIR
replicator_daemon = replicator.AccountReplicator
def test_sync(self):
broker = self._get_broker('a', node_index=0)
put_timestamp = normalize_timestamp(time.time())
broker.initialize(put_timestamp)
# "replicate" to same database
daemon = replicator.AccountReplicator({})
part, node = self._get_broker_part_node(broker)
info = broker.get_replication_info()
success = daemon._repl_to_node(node, broker, part, info)
# nothing to do
self.assertTrue(success)
self.assertEqual(1, daemon.stats['no_change'])
def test_sync_remote_missing(self):
broker = self._get_broker('a', node_index=0)
put_timestamp = time.time()
broker.initialize(put_timestamp)
# "replicate" to all other nodes
part, node = self._get_broker_part_node(broker)
daemon = self._run_once(node)
# complete rsync
self.assertEqual(2, daemon.stats['rsync'])
local_info = self._get_broker(
'a', node_index=0).get_info()
for i in range(1, 3):
remote_broker = self._get_broker('a', node_index=i)
self.assertTrue(os.path.exists(remote_broker.db_file))
remote_info = remote_broker.get_info()
for k, v in local_info.items():
if k == 'id':
continue
self.assertEqual(remote_info[k], v,
"mismatch remote %s %r != %r" % (
k, remote_info[k], v))
def test_sync_remote_missing_most_rows(self):
put_timestamp = time.time()
# create "local" broker
broker = self._get_broker('a', node_index=0)
broker.initialize(put_timestamp)
# create "remote" broker
remote_broker = self._get_broker('a', node_index=1)
remote_broker.initialize(put_timestamp)
# add a row to "local" db
broker.put_container('/a/c', time.time(), 0, 0, 0,
POLICIES.default.idx)
#replicate
daemon = replicator.AccountReplicator({})
def _rsync_file(db_file, remote_file, **kwargs):
remote_server, remote_path = remote_file.split('/', 1)
dest_path = os.path.join(self.root, remote_path)
shutil.copy(db_file, dest_path)
return True
daemon._rsync_file = _rsync_file
part, node = self._get_broker_part_node(remote_broker)
info = broker.get_replication_info()
success = daemon._repl_to_node(node, broker, part, info)
self.assertTrue(success)
# row merge
self.assertEqual(1, daemon.stats['remote_merge'])
local_info = self._get_broker(
'a', node_index=0).get_info()
remote_info = self._get_broker(
'a', node_index=1).get_info()
for k, v in local_info.items():
if k == 'id':
continue
self.assertEqual(remote_info[k], v,
"mismatch remote %s %r != %r" % (
k, remote_info[k], v))
def test_sync_remote_missing_one_rows(self):
put_timestamp = time.time()
# create "local" broker
broker = self._get_broker('a', node_index=0)
broker.initialize(put_timestamp)
# create "remote" broker
remote_broker = self._get_broker('a', node_index=1)
remote_broker.initialize(put_timestamp)
# add some rows to both db
for i in range(10):
put_timestamp = time.time()
for db in (broker, remote_broker):
path = '/a/c_%s' % i
db.put_container(path, put_timestamp, 0, 0, 0,
POLICIES.default.idx)
# now a row to the "local" broker only
broker.put_container('/a/c_missing', time.time(), 0, 0, 0,
POLICIES.default.idx)
# replicate
daemon = replicator.AccountReplicator({})
part, node = self._get_broker_part_node(remote_broker)
info = broker.get_replication_info()
success = daemon._repl_to_node(node, broker, part, info)
self.assertTrue(success)
# row merge
self.assertEqual(1, daemon.stats['diff'])
local_info = self._get_broker(
'a', node_index=0).get_info()
remote_info = self._get_broker(
'a', node_index=1).get_info()
for k, v in local_info.items():
if k == 'id':
continue
self.assertEqual(remote_info[k], v,
"mismatch remote %s %r != %r" % (
k, remote_info[k], v))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
louietsai/python-for-android | python3-alpha/python3-src/Lib/test/test_multibytecodec.py | 55 | 11487 | #!/usr/bin/env python3
#
# test_multibytecodec.py
# Unit test for multibytecodec itself
#
from test import support
from test.support import TESTFN
import unittest, io, codecs, sys, os
import _multibytecodec
ALL_CJKENCODINGS = [
# _codecs_cn
'gb2312', 'gbk', 'gb18030', 'hz',
# _codecs_hk
'big5hkscs',
# _codecs_jp
'cp932', 'shift_jis', 'euc_jp', 'euc_jisx0213', 'shift_jisx0213',
'euc_jis_2004', 'shift_jis_2004',
# _codecs_kr
'cp949', 'euc_kr', 'johab',
# _codecs_tw
'big5', 'cp950',
# _codecs_iso2022
'iso2022_jp', 'iso2022_jp_1', 'iso2022_jp_2', 'iso2022_jp_2004',
'iso2022_jp_3', 'iso2022_jp_ext', 'iso2022_kr',
]
class Test_MultibyteCodec(unittest.TestCase):
def test_nullcoding(self):
for enc in ALL_CJKENCODINGS:
self.assertEqual(b''.decode(enc), '')
self.assertEqual(str(b'', enc), '')
self.assertEqual(''.encode(enc), b'')
def test_str_decode(self):
for enc in ALL_CJKENCODINGS:
self.assertEqual('abcd'.encode(enc), b'abcd')
def test_errorcallback_longindex(self):
dec = codecs.getdecoder('euc-kr')
myreplace = lambda exc: ('', sys.maxsize+1)
codecs.register_error('test.cjktest', myreplace)
self.assertRaises(IndexError, dec,
b'apple\x92ham\x93spam', 'test.cjktest')
def test_codingspec(self):
try:
for enc in ALL_CJKENCODINGS:
code = '# coding: {}\n'.format(enc)
exec(code)
finally:
support.unlink(TESTFN)
def test_init_segfault(self):
# bug #3305: this used to segfault
self.assertRaises(AttributeError,
_multibytecodec.MultibyteStreamReader, None)
self.assertRaises(AttributeError,
_multibytecodec.MultibyteStreamWriter, None)
def test_decode_unicode(self):
# Trying to decode an unicode string should raise a TypeError
for enc in ALL_CJKENCODINGS:
self.assertRaises(TypeError, codecs.getdecoder(enc), "")
class Test_IncrementalEncoder(unittest.TestCase):
def test_stateless(self):
# cp949 encoder isn't stateful at all.
encoder = codecs.getincrementalencoder('cp949')()
self.assertEqual(encoder.encode('\ud30c\uc774\uc36c \ub9c8\uc744'),
b'\xc6\xc4\xc0\xcc\xbd\xe3 \xb8\xb6\xc0\xbb')
self.assertEqual(encoder.reset(), None)
self.assertEqual(encoder.encode('\u2606\u223c\u2606', True),
b'\xa1\xd9\xa1\xad\xa1\xd9')
self.assertEqual(encoder.reset(), None)
self.assertEqual(encoder.encode('', True), b'')
self.assertEqual(encoder.encode('', False), b'')
self.assertEqual(encoder.reset(), None)
def test_stateful(self):
# jisx0213 encoder is stateful for a few codepoints. eg)
# U+00E6 => A9DC
# U+00E6 U+0300 => ABC4
# U+0300 => ABDC
encoder = codecs.getincrementalencoder('jisx0213')()
self.assertEqual(encoder.encode('\u00e6\u0300'), b'\xab\xc4')
self.assertEqual(encoder.encode('\u00e6'), b'')
self.assertEqual(encoder.encode('\u0300'), b'\xab\xc4')
self.assertEqual(encoder.encode('\u00e6', True), b'\xa9\xdc')
self.assertEqual(encoder.reset(), None)
self.assertEqual(encoder.encode('\u0300'), b'\xab\xdc')
self.assertEqual(encoder.encode('\u00e6'), b'')
self.assertEqual(encoder.encode('', True), b'\xa9\xdc')
self.assertEqual(encoder.encode('', True), b'')
def test_stateful_keep_buffer(self):
encoder = codecs.getincrementalencoder('jisx0213')()
self.assertEqual(encoder.encode('\u00e6'), b'')
self.assertRaises(UnicodeEncodeError, encoder.encode, '\u0123')
self.assertEqual(encoder.encode('\u0300\u00e6'), b'\xab\xc4')
self.assertRaises(UnicodeEncodeError, encoder.encode, '\u0123')
self.assertEqual(encoder.reset(), None)
self.assertEqual(encoder.encode('\u0300'), b'\xab\xdc')
self.assertEqual(encoder.encode('\u00e6'), b'')
self.assertRaises(UnicodeEncodeError, encoder.encode, '\u0123')
self.assertEqual(encoder.encode('', True), b'\xa9\xdc')
def test_issue5640(self):
encoder = codecs.getincrementalencoder('shift-jis')('backslashreplace')
self.assertEqual(encoder.encode('\xff'), b'\\xff')
self.assertEqual(encoder.encode('\n'), b'\n')
class Test_IncrementalDecoder(unittest.TestCase):
def test_dbcs(self):
# cp949 decoder is simple with only 1 or 2 bytes sequences.
decoder = codecs.getincrementaldecoder('cp949')()
self.assertEqual(decoder.decode(b'\xc6\xc4\xc0\xcc\xbd'),
'\ud30c\uc774')
self.assertEqual(decoder.decode(b'\xe3 \xb8\xb6\xc0\xbb'),
'\uc36c \ub9c8\uc744')
self.assertEqual(decoder.decode(b''), '')
def test_dbcs_keep_buffer(self):
decoder = codecs.getincrementaldecoder('cp949')()
self.assertEqual(decoder.decode(b'\xc6\xc4\xc0'), '\ud30c')
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', True)
self.assertEqual(decoder.decode(b'\xcc'), '\uc774')
self.assertEqual(decoder.decode(b'\xc6\xc4\xc0'), '\ud30c')
self.assertRaises(UnicodeDecodeError, decoder.decode,
b'\xcc\xbd', True)
self.assertEqual(decoder.decode(b'\xcc'), '\uc774')
def test_iso2022(self):
decoder = codecs.getincrementaldecoder('iso2022-jp')()
ESC = b'\x1b'
self.assertEqual(decoder.decode(ESC + b'('), '')
self.assertEqual(decoder.decode(b'B', True), '')
self.assertEqual(decoder.decode(ESC + b'$'), '')
self.assertEqual(decoder.decode(b'B@$'), '\u4e16')
self.assertEqual(decoder.decode(b'@$@'), '\u4e16')
self.assertEqual(decoder.decode(b'$', True), '\u4e16')
self.assertEqual(decoder.reset(), None)
self.assertEqual(decoder.decode(b'@$'), '@$')
self.assertEqual(decoder.decode(ESC + b'$'), '')
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', True)
self.assertEqual(decoder.decode(b'B@$'), '\u4e16')
def test_decode_unicode(self):
# Trying to decode an unicode string should raise a TypeError
for enc in ALL_CJKENCODINGS:
decoder = codecs.getincrementaldecoder(enc)()
self.assertRaises(TypeError, decoder.decode, "")
class Test_StreamReader(unittest.TestCase):
def test_bug1728403(self):
try:
f = open(TESTFN, 'wb')
try:
f.write(b'\xa1')
finally:
f.close()
f = codecs.open(TESTFN, encoding='cp949')
try:
self.assertRaises(UnicodeDecodeError, f.read, 2)
finally:
f.close()
finally:
support.unlink(TESTFN)
class Test_StreamWriter(unittest.TestCase):
if len('\U00012345') == 2: # UCS2
def test_gb18030(self):
s= io.BytesIO()
c = codecs.getwriter('gb18030')(s)
c.write('123')
self.assertEqual(s.getvalue(), b'123')
c.write('\U00012345')
self.assertEqual(s.getvalue(), b'123\x907\x959')
c.write('\U00012345'[0])
self.assertEqual(s.getvalue(), b'123\x907\x959')
c.write('\U00012345'[1] + '\U00012345' + '\uac00\u00ac')
self.assertEqual(s.getvalue(),
b'123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
c.write('\U00012345'[0])
self.assertEqual(s.getvalue(),
b'123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
self.assertRaises(UnicodeError, c.reset)
self.assertEqual(s.getvalue(),
b'123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
def test_utf_8(self):
s= io.BytesIO()
c = codecs.getwriter('utf-8')(s)
c.write('123')
self.assertEqual(s.getvalue(), b'123')
c.write('\U00012345')
self.assertEqual(s.getvalue(), b'123\xf0\x92\x8d\x85')
# Python utf-8 codec can't buffer surrogate pairs yet.
if 0:
c.write('\U00012345'[0])
self.assertEqual(s.getvalue(), b'123\xf0\x92\x8d\x85')
c.write('\U00012345'[1] + '\U00012345' + '\uac00\u00ac')
self.assertEqual(s.getvalue(),
b'123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
b'\xea\xb0\x80\xc2\xac')
c.write('\U00012345'[0])
self.assertEqual(s.getvalue(),
b'123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
b'\xea\xb0\x80\xc2\xac')
c.reset()
self.assertEqual(s.getvalue(),
b'123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
b'\xea\xb0\x80\xc2\xac\xed\xa0\x88')
c.write('\U00012345'[1])
self.assertEqual(s.getvalue(),
b'123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
b'\xea\xb0\x80\xc2\xac\xed\xa0\x88\xed\xbd\x85')
else: # UCS4
pass
def test_streamwriter_strwrite(self):
s = io.BytesIO()
wr = codecs.getwriter('gb18030')(s)
wr.write('abcd')
self.assertEqual(s.getvalue(), b'abcd')
class Test_ISO2022(unittest.TestCase):
def test_g2(self):
iso2022jp2 = b'\x1b(B:hu4:unit\x1b.A\x1bNi de famille'
uni = ':hu4:unit\xe9 de famille'
self.assertEqual(iso2022jp2.decode('iso2022-jp-2'), uni)
def test_iso2022_jp_g0(self):
self.assertNotIn(b'\x0e', '\N{SOFT HYPHEN}'.encode('iso-2022-jp-2'))
for encoding in ('iso-2022-jp-2004', 'iso-2022-jp-3'):
e = '\u3406'.encode(encoding)
self.assertFalse(any(x > 0x80 for x in e))
def test_bug1572832(self):
if sys.maxunicode >= 0x10000:
myunichr = chr
else:
myunichr = lambda x: chr(0xD7C0+(x>>10)) + chr(0xDC00+(x&0x3FF))
for x in range(0x10000, 0x110000):
# Any ISO 2022 codec will cause the segfault
myunichr(x).encode('iso_2022_jp', 'ignore')
class TestStateful(unittest.TestCase):
text = '\u4E16\u4E16'
encoding = 'iso-2022-jp'
expected = b'\x1b$B@$@$'
expected_reset = b'\x1b$B@$@$\x1b(B'
def test_encode(self):
self.assertEqual(self.text.encode(self.encoding), self.expected_reset)
def test_incrementalencoder(self):
encoder = codecs.getincrementalencoder(self.encoding)()
output = b''.join(
encoder.encode(char)
for char in self.text)
self.assertEqual(output, self.expected)
def test_incrementalencoder_final(self):
encoder = codecs.getincrementalencoder(self.encoding)()
last_index = len(self.text) - 1
output = b''.join(
encoder.encode(char, index == last_index)
for index, char in enumerate(self.text))
self.assertEqual(output, self.expected_reset)
class TestHZStateful(TestStateful):
text = '\u804a\u804a'
encoding = 'hz'
expected = b'~{ADAD'
expected_reset = b'~{ADAD~}'
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| apache-2.0 |
rajendrakrp/GeoMicroFormat | djangoappengine/management/commands/testserver.py | 13 | 2962 | from django.core.management.base import BaseCommand
from google.appengine.api import apiproxy_stub_map
from google.appengine.datastore import datastore_stub_util
from optparse import make_option
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--addrport', action='store', dest='addrport',
type='string', default='',
help='port number or ipaddr:port to run the server on'),
make_option('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use a IPv6 address.'),
)
help = 'Runs a development server with data from the given fixture(s).'
args = '[fixture ...]'
requires_model_validation = False
def handle(self, *fixture_labels, **options):
from django.core.management import call_command
from django import db
from ...db.base import get_datastore_paths
from ...db.stubs import stub_manager
verbosity = int(options.get('verbosity'))
interactive = options.get('interactive')
addrport = options.get('addrport')
db_name = None
# switch default database to test
for name, settings in db.connections.databases.items():
if settings['ENGINE'] == 'djangoappengine.db':
db_name = name
for key, path in get_datastore_paths(settings).items():
settings[key] = "%s-testdb" % path
break
# reset connections list so its repopulated
db.connections._connections = {}
db.connection = db.connections[db.DEFAULT_DB_ALIAS]
# also reset stub manager
stub_manager.active_stubs = None
# run flush on that db
conn = db.connections[db_name]
conn.flush()
# Temporarily change consistency policy to force apply loaded data
datastore = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
orig_consistency_policy = datastore._consistency_policy
datastore.SetConsistencyPolicy(datastore_stub_util.PseudoRandomHRConsistencyPolicy(probability=1))
# Import the fixture data into the test database.
call_command('loaddata', *fixture_labels, **{'verbosity': verbosity})
datastore.SetConsistencyPolicy(orig_consistency_policy)
# Run the development server. Turn off auto-reloading because it causes
# a strange error -- it causes this handle() method to be called
# multiple times.
shutdown_message = '\nServer stopped.\nNote that the test database, %r, has not been deleted. You can explore it on your own.' % db_name
call_command('runserver', addrport=addrport, shutdown_message=shutdown_message, use_reloader=False, use_ipv6=options['use_ipv6'])
| bsd-3-clause |
MartinThoma/algorithms | Python/timing/password_hashing.py | 1 | 5049 | #!/usr/bin/env python
import random
import timeit
import numpy as np
from werkzeug.security import check_password_hash, generate_password_hash
random.seed(0)
def main():
str_gen = "import random;random.seed(0);string=''.join(random.choices('ABCDEFGHIJKLM', k=20));"
pw_gen = (
"from werkzeug.security import generate_password_hash, check_password_hash;"
)
string_20 = "".join(random.choices("ABCDEFGHIJKLM", k=20))
# string_2000 = ''.join(random.choices("ABCDEFGHIJKLM", k=2000))
# string_200000 = ''.join(random.choices("ABCDEFGHIJKLM", k=200000))
# string_200000000 = ''.join(random.choices("ABCDEFGHIJKLM", k=200000000))
functions = [
(
"generate_password_hash(string, method='pbkdf2:sha512:1',salt_length=8)",
"sha512, 1 iteration",
str_gen + pw_gen,
),
(
"generate_password_hash(string, method='pbkdf2:sha512:15000',salt_length=8)",
"sha512, 15000 iteration",
str_gen + pw_gen,
),
(
"generate_password_hash(string, method='pbkdf2:sha256:15000',salt_length=8)",
"sha256, 15000 iteration",
str_gen + pw_gen,
),
(
"generate_password_hash(string, method='pbkdf2:sha512:1000',salt_length=8)",
"sha512, 1000 iteration",
str_gen + pw_gen,
),
(
"generate_password_hash(string, method='pbkdf2:sha256:1000',salt_length=8)",
"sha256, 1000 iteration",
str_gen + pw_gen,
),
(
"generate_password_hash(string, method='pbkdf2:md5:15000',salt_length=8)",
"md5, 15000 iteration",
str_gen + pw_gen,
),
]
iter_list(
functions,
title="Password generation time",
outfile="password-generation-time.png",
)
functions = [
(
"check_password_hash(pwhash=hash, password=string)",
"sha512, 1 iteration",
str_gen
+ pw_gen
+ "hash=generate_password_hash(\"{}\", method='pbkdf2:sha512:1', salt_length=8)",
),
(
"check_password_hash(pwhash=hash, password=string)",
"sha512, 1000 iteration",
str_gen
+ pw_gen
+ "hash=generate_password_hash(\"{}\", method='pbkdf2:sha512:1000', salt_length=8)",
),
(
"check_password_hash(pwhash=hash, password=string)",
"sha256, 1000 iteration",
str_gen
+ pw_gen
+ "hash=generate_password_hash(\"{}\", method='pbkdf2:sha256:1000', salt_length=8)",
),
(
"check_password_hash(pwhash=hash, password=string)",
"sha512, 15000 iteration",
str_gen
+ pw_gen
+ "hash=generate_password_hash(\"{}\", method='pbkdf2:sha512:15000', salt_length=8)",
),
(
"check_password_hash(pwhash=hash, password=string)",
"sha256, 15000 iteration",
str_gen
+ pw_gen
+ "hash=generate_password_hash(\"{}\", method='pbkdf2:sha256:15000', salt_length=8)",
),
(
"check_password_hash(pwhash=hash, password=string)",
"md5, 15000 iteration",
str_gen
+ pw_gen
+ "hash=generate_password_hash(\"{}\", method='pbkdf2:md5:15000', salt_length=8)",
),
]
iter_list(
functions,
title="Password verification time",
outfile="password-verification-time.png",
)
def iter_list(functions, title, outfile):
duration_list = {}
for func, name, setup in functions:
durations = timeit.repeat(func, repeat=100, number=3, setup=setup)
duration_list[name] = list(np.array(durations) * 1000)
print(
"{func:<20}: "
"min: {min:5.1f}μs, mean: {mean:5.1f}μs, max: {max:6.1f}μs".format(
func=name,
min=min(durations) * 10 ** 6,
mean=np.mean(durations) * 10 ** 6,
max=max(durations) * 10 ** 6,
)
)
create_boxplot(title, duration_list, outfile=outfile)
def create_boxplot(title, duration_list, showfliers=False, outfile="out.png"):
import operator
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(num=None, figsize=(8, 4), dpi=300, facecolor="w", edgecolor="k")
sns.set(style="whitegrid")
sorted_keys, sorted_vals = zip(
*sorted(duration_list.items(), key=operator.itemgetter(0))
)
flierprops = dict(markerfacecolor="0.75", markersize=1, linestyle="none")
ax = sns.boxplot(
data=sorted_vals,
width=0.3,
orient="h",
flierprops=flierprops,
showfliers=showfliers,
)
ax.set(xlabel="Time in ms", ylabel="")
plt.yticks(plt.yticks()[0], sorted_keys)
ax.set_title(title)
plt.tight_layout()
plt.savefig(outfile)
if __name__ == "__main__":
main()
| mit |
jnishi/chainer | tests/chainerx_tests/unit_tests/routines_tests/test_pooling.py | 2 | 5886 | import chainer
import numpy
import pytest
import chainerx
from chainerx_tests import array_utils
def _create_max_pool_args(
xp, device, x_shape, ksize, stride, pad, cover_all, float_dtype):
x = array_utils.create_dummy_ndarray(xp, x_shape, float_dtype)
ret_args = dict(x=x, ksize=ksize)
if stride is not None:
ret_args['stride'] = stride
if pad is not None:
ret_args['pad'] = pad
if cover_all is not None:
ret_args['cover_all'] = cover_all
return ret_args
@pytest.mark.parametrize('x_shape,ksize,stride,pad', [
((2, 3, 4), (1,), 1, 0),
((1, 3, 4), (2, ), 3, 2),
((1, 3, 4), (2,), 3, 2),
((2, 3, 4, 4), (3, 3), 1, 0),
((2, 3, 4, 4), (3, 3), None, 0),
((1, 3, 4, 4), (3, 3), (1, 2), 1),
((1, 3, 4, 4), (3, 3), 2, (2, 0)),
((1, 3, 2, 6, 3), (1, 3, 2), 2, (2, 0, 1)),
((1, 3, 2, 6, 3), (1, 3, 2), (1, 2, 3), (2, 0, 1)),
((2, 3, 2, 6, 3), (1, 3, 2), (1, 2, 3), (2, 0, 1)),
((1, 3, 2, 6, 3, 2), (1, 3, 2, 2), 2, 2),
])
@pytest.mark.parametrize('cover_all', [True, False])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_max_pool(device, x_shape, ksize, stride, pad, cover_all, float_dtype):
if device.backend.name == 'cuda' and len(ksize) != 2 and len(ksize) != 3:
# cuDNN supports only 2 and 3 spatial dimensions.
return chainerx.testing.ignore()
def create_args(xp):
return _create_max_pool_args(
xp, device, x_shape, ksize, stride, pad, cover_all, float_dtype)
def chainerx_max_pool():
y = chainerx.max_pool(**create_args(chainerx))
# In the case of CUDA, we get huge negative numbers instead of -inf
# around boundaries.
# Align them to chainer (native) results.
if device.backend.name == 'cuda':
y = chainerx.to_numpy(y)
y[y < -3.e+34] = -float('inf')
y = chainerx.array(y)
return y
chainerx.testing.assert_allclose(chainerx_max_pool(
), chainer.functions.max_pooling_nd(**create_args(numpy)).data)
@pytest.mark.parametrize('x_shape,ksize,stride,pad', [
((1, 3), (), 1, 0), # Requires at least one spatial dimension
((2, 3, 4, 3), (2, 2, 1), 3, 2), # Wrong number of ksize.
((2, 3, 4, 3), (2, 2), (1,), 0), # Wrong number of strides.
((1, 3, 4, 3), (2, 2), 3, (2,)), # Wrong number of paddings.
((4, 4, 2, 2), 5, 3, 0), # Output size should be positive.
])
@pytest.mark.parametrize('cover_all', [True, False])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_max_pool_invalid(
device, x_shape, ksize, stride, pad, cover_all, float_dtype):
with pytest.raises(chainerx.DimensionError):
chainerx.max_pool(
**_create_max_pool_args(
chainerx, device, x_shape, ksize, stride, pad, cover_all,
float_dtype))
def _create_average_pool_args(
xp, device, x_shape, ksize, stride, pad, pad_mode, float_dtype):
x = array_utils.create_dummy_ndarray(xp, x_shape, float_dtype)
ret_args = dict(x=x, ksize=ksize)
if stride is not None:
ret_args['stride'] = stride
if pad is not None:
ret_args['pad'] = pad
if pad_mode is None:
# chainerx defaults to 'ignore', which is equivalent with
# pad_value=None in chainer
if xp is not chainerx:
ret_args['pad_value'] = None
else:
if xp is chainerx:
ret_args['pad_mode'] = pad_mode
else:
if pad_mode == 'zero':
ret_args['pad_value'] = 0
elif pad_mode == 'ignore':
ret_args['pad_value'] = None
else:
assert False # should never reach
return ret_args
# ignore warning occuring when pad_value is None in chainer
@pytest.mark.filterwarnings('ignore:invalid value encountered in true_divide')
@pytest.mark.parametrize('x_shape,ksize,stride,pad', [
((2, 3, 4), (1,), 1, 0),
((1, 3, 4), (2, ), 3, 2),
((1, 3, 4), (2,), 3, 2),
((2, 3, 4, 4), (3, 3), 1, 0),
((2, 3, 4, 4), (3, 3), None, 0),
((1, 3, 4, 4), (3, 3), (1, 2), 1),
((1, 3, 4, 4), (3, 3), 2, (2, 0)),
((1, 3, 2, 6, 3), (1, 3, 2), 2, (2, 0, 1)),
((1, 3, 2, 6, 3), (1, 3, 2), (1, 2, 3), (2, 0, 1)),
((2, 3, 2, 6, 3), (1, 3, 2), (1, 2, 3), (2, 0, 1)),
((1, 3, 2, 6, 3, 2), (1, 3, 1, 1), 1, 1),
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('pad_mode', ['zero', 'ignore', None])
def test_average_pool(
device, x_shape, ksize, stride, pad, pad_mode, float_dtype):
if device.backend.name == 'cuda' and len(ksize) != 2 and len(ksize) != 3:
# cuDNN supports only 2 and 3 spatial dimensions.
return chainerx.testing.ignore()
def create_args(xp):
return _create_average_pool_args(
xp, device, x_shape, ksize, stride, pad, pad_mode, float_dtype)
chainerx.testing.assert_allclose(
chainerx.average_pool(**create_args(chainerx)),
chainer.functions.average_pooling_nd(**create_args(numpy)).data)
@pytest.mark.parametrize('x_shape,ksize,stride,pad', [
((1, 3), (), 1, 0), # Requires at least one spatial dimension
((2, 3, 4, 3), (2, 2, 1), 3, 2), # Wrong number of ksize.
((2, 3, 4, 3), (2, 2), (1,), 0), # Wrong number of strides.
((1, 3, 4, 3), (2, 2), 3, (2,)), # Wrong number of paddings.
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('pad_mode', ['zero', 'ignore', None])
def test_average_pool_invalid(
device, x_shape, ksize, stride, pad, pad_mode, float_dtype):
with pytest.raises(chainerx.DimensionError):
chainerx.average_pool(
**_create_average_pool_args(
chainerx, device, x_shape, ksize, stride, pad, pad_mode,
float_dtype))
| mit |
caidongyun/pylearn2 | pylearn2/scripts/dbm/top_filters.py | 37 | 5478 | #!/usr/bin/env python
"""
Usage:
./top_filters <path_to_a_saved_DBM.pkl> <optional: output path prefix>
Displays the matrix product of the layer 1 and layer 2 weights.
Also displays a grid visualization the connections in more detail.
Row i of the grid corresponds to the second layer hidden unit
with the ith largest filter norm.
Grid cell (i,j) shows the filter for the first layer unit with the
jth largest weight going into the second layer unit for this row.
The cells is surrounded by a colored box.
Its brightness indicates the relative strength of the connection between
the first layer unit and second layer unit, and its color indicates
the sign of that connection (yellow = positive / excitatory,
magenta = negative / inhibitory).
Optionally saves these images as png files prefixed with
the given output path name instead of displaying them.
This can be useful when working over ssh.
"""
from __future__ import print_function
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
import numpy as np
import sys
from theano.compat.six.moves import xrange
from pylearn2.config import yaml_parse
from pylearn2.gui.patch_viewer import PatchViewer
from pylearn2.gui.patch_viewer import make_viewer
from pylearn2.utils import serial
def sort_layer2(W2):
"""
Sort weights of the a layer.
Parameters
----------
W2: list
The hidden layer to sort.
"""
print('Sorting so largest-norm layer 2 weights are plotted at the top')
norms = np.square(W2).sum(axis=0)
idxs = [elem[1] for elem in sorted(zip(-norms, range(norms.shape[0])))]
new = W2.copy()
for i in xrange(len(idxs)):
new[:, i] = W2[:, idxs[i]]
W2 = new
return new
def get_mat_product_viewer(W1, W2):
"""
Show the matrix product of 2 layers.
Parameters
----------
W1: list
First hidden layer.
W2: list
Second hidden layer.
out_prefix: str
Path where to save image.
"""
prod = np.dot(W1, W2)
pv = make_viewer(prod.T)
return pv
def get_connections_viewer(imgs, W1, W2):
"""
Show connections between 2 hidden layers.
Parameters
----------
imgs: ndarray
Images of weights from the first layer.
W1: list
First hidden layer.
W2: list
Second hidden layer.
"""
W2 = sort_layer2(W2)
N1 = W1.shape[1]
N = W2.shape[1]
N = min(N, 100)
count = get_elements_count(N, N1, W2)
pv = create_connect_viewer(N, N1, imgs, count, W2)
return pv
def create_connect_viewer(N, N1, imgs, count, W2):
"""
Create the patch to show connections between layers.
Parameters
----------
N: int
Number of rows.
N1: int
Number of elements in the first layer.
imgs: ndarray
Images of weights from the first layer.
count: int
Number of elements to show.
W2: list
Second hidden layer.
"""
pv = PatchViewer((N, count), imgs.shape[1:3], is_color=imgs.shape[3] == 3)
for i in xrange(N):
w = W2[:, i]
wneg = w[w < 0.]
wpos = w[w > 0.]
w /= np.abs(w).max()
wa = np.abs(w)
to_sort = zip(wa, range(N1), w)
s = sorted(to_sort)
for j in xrange(count):
idx = s[N1-j-1][1]
mag = s[N1-j-1][2]
if mag > 0:
act = (mag, 0)
else:
act = (0, -mag)
pv.add_patch(imgs[idx, ...], rescale=True, activation=act)
return pv
def get_elements_count(N, N1, W2):
"""
Retrieve the number of elements to show.
Parameters
----------
N: int
Number of rows.
N1: int
Number of elements in the first layer.
W2: list
Second hidden layer.
"""
thresh = .9
max_count = 0
total_counts = 0.
for i in xrange(N):
w = W2[:, i]
wa = np.abs(w)
total = wa.sum()
s = np.asarray(sorted(wa))
count = 1
while s[-count:].sum() < thresh * total:
count += 1
if count > max_count:
max_count = count
total_counts += count
ave = total_counts / float(N)
print('average needed filters', ave)
count = max_count
print('It takes', count, 'of', N1, 'elements to account for ',
(thresh*100.), '\% of the weight in at least one filter')
lim = 10
if count > lim:
count = lim
print('Only displaying ', count, ' elements though.')
if count > N1:
count = N1
return count
if __name__ == '__main__':
if len(sys.argv) == 2:
_, model_path = sys.argv
out_prefix = None
else:
_, model_path, out_prefix = sys.argv
model = serial.load(model_path)
layer_1, layer_2 = model.hidden_layers[0:2]
W1 = layer_1.get_weights()
W2 = layer_2.get_weights()
print(W1.shape)
print(W2.shape)
mat_v = get_mat_product_viewer(W1, W2)
if out_prefix is None:
mat_v.show()
else:
mat_v.save(out_prefix+"_prod.png")
dataset_yaml_src = model.dataset_yaml_src
dataset = yaml_parse.load(dataset_yaml_src)
imgs = dataset.get_weights_view(W1.T)
conn_v = get_connections_viewer(imgs, W1, W2)
if out_prefix is None:
conn_v.show()
else:
conn_v.save(out_prefix+".png")
| bsd-3-clause |
libcrosswind/libcrosswind | platform/windows/compilers/x64/TDM-GCC-64/gdb64/bin/lib/wsgiref/handlers.py | 137 | 15982 | """Base classes for server/gateway implementations"""
from types import StringType
from util import FileWrapper, guess_scheme, is_hop_by_hop
from headers import Headers
import sys, os, time
__all__ = ['BaseHandler', 'SimpleHandler', 'BaseCGIHandler', 'CGIHandler']
try:
dict
except NameError:
def dict(items):
d = {}
for k,v in items:
d[k] = v
return d
# Uncomment for 2.2 compatibility.
#try:
# True
# False
#except NameError:
# True = not None
# False = not True
# Weekday and month names for HTTP date/time formatting; always English!
_weekdayname = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
_monthname = [None, # Dummy so we can use 1-based month numbers
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
def format_date_time(timestamp):
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
_weekdayname[wd], day, _monthname[month], year, hh, mm, ss
)
class BaseHandler:
"""Manage the invocation of a WSGI application"""
# Configuration parameters; can override per-subclass or per-instance
wsgi_version = (1,0)
wsgi_multithread = True
wsgi_multiprocess = True
wsgi_run_once = False
origin_server = True # We are transmitting direct to client
http_version = "1.0" # Version that should be used for response
server_software = None # String name of server software, if any
# os_environ is used to supply configuration from the OS environment:
# by default it's a copy of 'os.environ' as of import time, but you can
# override this in e.g. your __init__ method.
os_environ = dict(os.environ.items())
# Collaborator classes
wsgi_file_wrapper = FileWrapper # set to None to disable
headers_class = Headers # must be a Headers-like class
# Error handling (also per-subclass or per-instance)
traceback_limit = None # Print entire traceback to self.get_stderr()
error_status = "500 Internal Server Error"
error_headers = [('Content-Type','text/plain')]
error_body = "A server error occurred. Please contact the administrator."
# State variables (don't mess with these)
status = result = None
headers_sent = False
headers = None
bytes_sent = 0
def run(self, application):
"""Invoke the application"""
# Note to self: don't move the close()! Asynchronous servers shouldn't
# call close() from finish_response(), so if you close() anywhere but
# the double-error branch here, you'll break asynchronous servers by
# prematurely closing. Async servers must return from 'run()' without
# closing if there might still be output to iterate over.
try:
self.setup_environ()
self.result = application(self.environ, self.start_response)
self.finish_response()
except:
try:
self.handle_error()
except:
# If we get an error handling an error, just give up already!
self.close()
raise # ...and let the actual server figure it out.
def setup_environ(self):
"""Set up the environment for one request"""
env = self.environ = self.os_environ.copy()
self.add_cgi_vars()
env['wsgi.input'] = self.get_stdin()
env['wsgi.errors'] = self.get_stderr()
env['wsgi.version'] = self.wsgi_version
env['wsgi.run_once'] = self.wsgi_run_once
env['wsgi.url_scheme'] = self.get_scheme()
env['wsgi.multithread'] = self.wsgi_multithread
env['wsgi.multiprocess'] = self.wsgi_multiprocess
if self.wsgi_file_wrapper is not None:
env['wsgi.file_wrapper'] = self.wsgi_file_wrapper
if self.origin_server and self.server_software:
env.setdefault('SERVER_SOFTWARE',self.server_software)
def finish_response(self):
"""Send any iterable data, then close self and the iterable
Subclasses intended for use in asynchronous servers will
want to redefine this method, such that it sets up callbacks
in the event loop to iterate over the data, and to call
'self.close()' once the response is finished.
"""
try:
if not self.result_is_file() or not self.sendfile():
for data in self.result:
self.write(data)
self.finish_content()
finally:
self.close()
def get_scheme(self):
"""Return the URL scheme being used"""
return guess_scheme(self.environ)
def set_content_length(self):
"""Compute Content-Length or switch to chunked encoding if possible"""
try:
blocks = len(self.result)
except (TypeError,AttributeError,NotImplementedError):
pass
else:
if blocks==1:
self.headers['Content-Length'] = str(self.bytes_sent)
return
# XXX Try for chunked encoding if origin server and client is 1.1
def cleanup_headers(self):
"""Make any necessary header changes or defaults
Subclasses can extend this to add other defaults.
"""
if 'Content-Length' not in self.headers:
self.set_content_length()
def start_response(self, status, headers,exc_info=None):
"""'start_response()' callable as specified by PEP 333"""
if exc_info:
try:
if self.headers_sent:
# Re-raise original exception if headers sent
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None # avoid dangling circular ref
elif self.headers is not None:
raise AssertionError("Headers already set!")
assert type(status) is StringType,"Status must be a string"
assert len(status)>=4,"Status must be at least 4 characters"
assert int(status[:3]),"Status message must begin w/3-digit code"
assert status[3]==" ", "Status message must have a space after code"
if __debug__:
for name,val in headers:
assert type(name) is StringType,"Header names must be strings"
assert type(val) is StringType,"Header values must be strings"
assert not is_hop_by_hop(name),"Hop-by-hop headers not allowed"
self.status = status
self.headers = self.headers_class(headers)
return self.write
def send_preamble(self):
"""Transmit version/status/date/server, via self._write()"""
if self.origin_server:
if self.client_is_modern():
self._write('HTTP/%s %s\r\n' % (self.http_version,self.status))
if 'Date' not in self.headers:
self._write(
'Date: %s\r\n' % format_date_time(time.time())
)
if self.server_software and 'Server' not in self.headers:
self._write('Server: %s\r\n' % self.server_software)
else:
self._write('Status: %s\r\n' % self.status)
def write(self, data):
"""'write()' callable as specified by PEP 333"""
assert type(data) is StringType,"write() argument must be string"
if not self.status:
raise AssertionError("write() before start_response()")
elif not self.headers_sent:
# Before the first output, send the stored headers
self.bytes_sent = len(data) # make sure we know content-length
self.send_headers()
else:
self.bytes_sent += len(data)
# XXX check Content-Length and truncate if too many bytes written?
self._write(data)
self._flush()
def sendfile(self):
"""Platform-specific file transmission
Override this method in subclasses to support platform-specific
file transmission. It is only called if the application's
return iterable ('self.result') is an instance of
'self.wsgi_file_wrapper'.
This method should return a true value if it was able to actually
transmit the wrapped file-like object using a platform-specific
approach. It should return a false value if normal iteration
should be used instead. An exception can be raised to indicate
that transmission was attempted, but failed.
NOTE: this method should call 'self.send_headers()' if
'self.headers_sent' is false and it is going to attempt direct
transmission of the file.
"""
return False # No platform-specific transmission by default
def finish_content(self):
"""Ensure headers and content have both been sent"""
if not self.headers_sent:
# Only zero Content-Length if not set by the application (so
# that HEAD requests can be satisfied properly, see #3839)
self.headers.setdefault('Content-Length', "0")
self.send_headers()
else:
pass # XXX check if content-length was too short?
def close(self):
"""Close the iterable (if needed) and reset all instance vars
Subclasses may want to also drop the client connection.
"""
try:
if hasattr(self.result,'close'):
self.result.close()
finally:
self.result = self.headers = self.status = self.environ = None
self.bytes_sent = 0; self.headers_sent = False
def send_headers(self):
"""Transmit headers to the client, via self._write()"""
self.cleanup_headers()
self.headers_sent = True
if not self.origin_server or self.client_is_modern():
self.send_preamble()
self._write(str(self.headers))
def result_is_file(self):
"""True if 'self.result' is an instance of 'self.wsgi_file_wrapper'"""
wrapper = self.wsgi_file_wrapper
return wrapper is not None and isinstance(self.result,wrapper)
def client_is_modern(self):
"""True if client can accept status and headers"""
return self.environ['SERVER_PROTOCOL'].upper() != 'HTTP/0.9'
def log_exception(self,exc_info):
"""Log the 'exc_info' tuple in the server log
Subclasses may override to retarget the output or change its format.
"""
try:
from traceback import print_exception
stderr = self.get_stderr()
print_exception(
exc_info[0], exc_info[1], exc_info[2],
self.traceback_limit, stderr
)
stderr.flush()
finally:
exc_info = None
def handle_error(self):
"""Log current error, and send error output to client if possible"""
self.log_exception(sys.exc_info())
if not self.headers_sent:
self.result = self.error_output(self.environ, self.start_response)
self.finish_response()
# XXX else: attempt advanced recovery techniques for HTML or text?
def error_output(self, environ, start_response):
"""WSGI mini-app to create error output
By default, this just uses the 'error_status', 'error_headers',
and 'error_body' attributes to generate an output page. It can
be overridden in a subclass to dynamically generate diagnostics,
choose an appropriate message for the user's preferred language, etc.
Note, however, that it's not recommended from a security perspective to
spit out diagnostics to any old user; ideally, you should have to do
something special to enable diagnostic output, which is why we don't
include any here!
"""
start_response(self.error_status,self.error_headers[:],sys.exc_info())
return [self.error_body]
# Pure abstract methods; *must* be overridden in subclasses
def _write(self,data):
"""Override in subclass to buffer data for send to client
It's okay if this method actually transmits the data; BaseHandler
just separates write and flush operations for greater efficiency
when the underlying system actually has such a distinction.
"""
raise NotImplementedError
def _flush(self):
"""Override in subclass to force sending of recent '_write()' calls
It's okay if this method is a no-op (i.e., if '_write()' actually
sends the data.
"""
raise NotImplementedError
def get_stdin(self):
"""Override in subclass to return suitable 'wsgi.input'"""
raise NotImplementedError
def get_stderr(self):
"""Override in subclass to return suitable 'wsgi.errors'"""
raise NotImplementedError
def add_cgi_vars(self):
"""Override in subclass to insert CGI variables in 'self.environ'"""
raise NotImplementedError
class SimpleHandler(BaseHandler):
"""Handler that's just initialized with streams, environment, etc.
This handler subclass is intended for synchronous HTTP/1.0 origin servers,
and handles sending the entire response output, given the correct inputs.
Usage::
handler = SimpleHandler(
inp,out,err,env, multithread=False, multiprocess=True
)
handler.run(app)"""
def __init__(self,stdin,stdout,stderr,environ,
multithread=True, multiprocess=False
):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.base_env = environ
self.wsgi_multithread = multithread
self.wsgi_multiprocess = multiprocess
def get_stdin(self):
return self.stdin
def get_stderr(self):
return self.stderr
def add_cgi_vars(self):
self.environ.update(self.base_env)
def _write(self,data):
self.stdout.write(data)
self._write = self.stdout.write
def _flush(self):
self.stdout.flush()
self._flush = self.stdout.flush
class BaseCGIHandler(SimpleHandler):
"""CGI-like systems using input/output/error streams and environ mapping
Usage::
handler = BaseCGIHandler(inp,out,err,env)
handler.run(app)
This handler class is useful for gateway protocols like ReadyExec and
FastCGI, that have usable input/output/error streams and an environment
mapping. It's also the base class for CGIHandler, which just uses
sys.stdin, os.environ, and so on.
The constructor also takes keyword arguments 'multithread' and
'multiprocess' (defaulting to 'True' and 'False' respectively) to control
the configuration sent to the application. It sets 'origin_server' to
False (to enable CGI-like output), and assumes that 'wsgi.run_once' is
False.
"""
origin_server = False
class CGIHandler(BaseCGIHandler):
"""CGI-based invocation via sys.stdin/stdout/stderr and os.environ
Usage::
CGIHandler().run(app)
The difference between this class and BaseCGIHandler is that it always
uses 'wsgi.run_once' of 'True', 'wsgi.multithread' of 'False', and
'wsgi.multiprocess' of 'True'. It does not take any initialization
parameters, but always uses 'sys.stdin', 'os.environ', and friends.
If you need to override any of these parameters, use BaseCGIHandler
instead.
"""
wsgi_run_once = True
# Do not allow os.environ to leak between requests in Google App Engine
# and other multi-run CGI use cases. This is not easily testable.
# See http://bugs.python.org/issue7250
os_environ = {}
def __init__(self):
BaseCGIHandler.__init__(
self, sys.stdin, sys.stdout, sys.stderr, dict(os.environ.items()),
multithread=False, multiprocess=True
)
| gpl-3.0 |
totcoindev/totcoin | qa/rpc-tests/txn_clone.py | 1 | 7986 | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with an equivalent malleability clone
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class TxnMallTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 12,500 SOS:
starting_balance = 12500
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].settxfee(.001)
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 12190)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 290)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 12190 - 290 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# Send tx1, and another transaction tx2 that won't be cloned
txid1 = self.nodes[0].sendfrom("foo", node1_address, 400, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 200, 0)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1,1)
clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}]
clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"],
rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]}
clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs)
# 3 hex manipulations on the clone are required
# manipulation 1. sequence is at version+#inputs+input+sigstub
posseq = 2*(4+1+36+1)
seqbe = '%08x' % rawtx1["vin"][0]["sequence"]
clone_raw = clone_raw[:posseq] + seqbe[6:8] + seqbe[4:6] + seqbe[2:4] + seqbe[0:2] + clone_raw[posseq + 8:]
# manipulation 2. createrawtransaction randomizes the order of its outputs, so swap them if necessary.
# output 0 is at version+#inputs+input+sigstub+sequence+#outputs
# 400 SOS serialized is 00902f5009000000
pos0 = 2*(4+1+36+1+4+1)
hex400 = "00902f5009000000"
output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0)
if (rawtx1["vout"][0]["value"] == 400 and clone_raw[pos0 : pos0 + 16] != hex400 or
rawtx1["vout"][0]["value"] != 400 and clone_raw[pos0 : pos0 + 16] == hex400):
output0 = clone_raw[pos0 : pos0 + output_len]
output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len]
clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:]
# manipulation 3. locktime is after outputs
poslt = pos0 + 2 * output_len
ltbe = '%08x' % rawtx1["locktime"]
clone_raw = clone_raw[:poslt] + ltbe[6:8] + ltbe[4:6] + ltbe[2:4] + ltbe[0:2] + clone_raw[poslt + 8:]
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
# Have node0 mine a block, if requested:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 500SOS for another
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 500
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 12190 + tx1["amount"] + tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 290 + tx2["amount"] + tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirmations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# Check node0's total balance; should be same as before the clone, + 1000 SOS for 2 matured,
# less possible orphaned matured subsidy
expected += 1000
if (self.options.mine_block):
expected -= 500
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*", 0), expected)
# Check node0's individual account balances.
# "foo" should have been debited by the equivalent clone of tx1
assert_equal(self.nodes[0].getbalance("foo"), 12190 + tx1["amount"] + tx1["fee"])
# "bar" should have been debited by (possibly unconfirmed) tx2
assert_equal(self.nodes[0].getbalance("bar", 0), 290 + tx2["amount"] + tx2["fee"])
# "" should have starting balance, less funding txes, plus subsidies
assert_equal(self.nodes[0].getbalance("", 0), starting_balance
- 12190
+ fund_foo_tx["fee"]
- 290
+ fund_bar_tx["fee"]
+ 1000)
# Node1's "from0" account balance
assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"]))
if __name__ == '__main__':
TxnMallTest().main()
| mit |
DgFutureLab/satoyama-api | app/conf.py | 1 | 1425 | from satoyama.database import manager
from app import flapp, limiter
### Settings necessary to use the app in the console
### Note that enrivonment settings override module settings.
module_config = {
'LOGLEVEL': 'DEBUG'
}
### Settings shared between environments
shared_config = {
'CSRF_ENABLED': True,
'SECRET_KEY': 'you-will-never-guess'
}
APP_TEST_SETTINGS = {
'DEBUG' : True,
'PROPAGATE_EXCEPTIONS' : False,
'PORT' : 8081,
'LOGLEVEL' : 'DEBUG',
'ENVIRONMENT' : 'TEST',
}
APP_DEV_SETTINGS = {
'DEBUG' : True,
'PROPAGATE_EXCEPTIONS' : False,
'PORT' : 8080,
'LOGLEVEL' : 'DEBUG',
'ENVIRONMENT' : 'DEVELOPMENT',
}
APP_PROD_SETTINGS = {
'DEBUG' : True,
'PROPAGATE_EXCEPTIONS' : False,
'PORT' : 8081,
'LOGLEVEL' : 'WARNING',
'ENVIRONMENT' : 'PRODUCTION',
}
def configure_flapp(env):
flapp.config.update(shared_config)
if env == 'test':
flapp.config.update(APP_TEST_SETTINGS)
limiter.enabled = False
elif env == 'development':
flapp.config.update(APP_DEV_SETTINGS)
elif env == 'production':
flapp.config.update(APP_PROD_SETTINGS)
else:
assert False, 'Please specify a valid environment'
__configure_database(env)
def __configure_database(env):
manager.set_database_environment(env)
flapp.config.update({'SQLALCHEMY_DATABASE_URI' : manager.get_db_URI()})
setattr(flapp, 'engine', manager.engine)
setattr(flapp, 'db_session', manager.session)
| mit |
axhue/minecraftRL | execute.py | 1 | 1282 | from code.agent import Agent,AgentConfig
import gym
from code.preprocessors import AtariPreprocessor
from code.policies import LinearDecayGreedyEpsilonPolicy
from code.utils import get_output_folder
import os
from keras import backend as K
import tensorflow as tf
import numpy as np
if __name__== '__main__':
atari_env = gym.make('Pitfall-v0')
doc = get_output_folder(None,"./logs","Pitfall-v0","DDQRN20")
#np.random.seed(0)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
sess = tf.Session(config=config)
K.tensorflow_backend.set_session(sess)
episodes = 100000
cfg = AgentConfig()
cfg.stateCnt = atari_env.observation_space.shape
cfg.actionCnt = atari_env.action_space.n
cfg.mem_size = 1000000
cfg.epsilon_policy = LinearDecayGreedyEpsilonPolicy(1.0,0.05,episodes+300000 )
cfg.gamma = 0.99
cfg.num_frames = 10
cfg.learning_rate = 0.0001
cfg.train_start = 50
cfg.train_freq = 4
cfg.target_update_freq = 10000
cfg.batch_size = 32
cfg.preprocessor = AtariPreprocessor((84,84,1))
cfg.log_path = doc
cfg.name = "R20"
agent = Agent(cfg,sess)
agent.train(atari_env,episodes,render=False)
#agent.fit(atari_env,episodes) | mit |
anistark/mozillians | vendor-local/lib/python/celery/tests/test_slow/test_buckets.py | 14 | 9830 | from __future__ import absolute_import
from __future__ import with_statement
import sys
import time
from functools import partial
from itertools import chain, izip
from celery.registry import TaskRegistry
from celery.task.base import Task
from celery.utils import timeutils
from celery.utils import uuid
from celery.worker import buckets
from celery.tests.utils import Case, skip_if_environ
skip_if_disabled = partial(skip_if_environ("SKIP_RLIMITS"))
class MockJob(object):
def __init__(self, task_id, task_name, args, kwargs):
self.task_id = task_id
self.task_name = task_name
self.args = args
self.kwargs = kwargs
def __eq__(self, other):
if isinstance(other, self.__class__):
return bool(self.task_id == other.task_id \
and self.task_name == other.task_name \
and self.args == other.args \
and self.kwargs == other.kwargs)
else:
return self == other
def __repr__(self):
return "<MockJob: task:%s id:%s args:%s kwargs:%s" % (
self.task_name, self.task_id, self.args, self.kwargs)
class test_TokenBucketQueue(Case):
@skip_if_disabled
def empty_queue_yields_QueueEmpty(self):
x = buckets.TokenBucketQueue(fill_rate=10)
with self.assertRaises(buckets.Empty):
x.get()
@skip_if_disabled
def test_bucket__put_get(self):
x = buckets.TokenBucketQueue(fill_rate=10)
x.put("The quick brown fox")
self.assertEqual(x.get(), "The quick brown fox")
x.put_nowait("The lazy dog")
time.sleep(0.2)
self.assertEqual(x.get_nowait(), "The lazy dog")
@skip_if_disabled
def test_fill_rate(self):
x = buckets.TokenBucketQueue(fill_rate=10)
# 20 items should take at least one second to complete
time_start = time.time()
[x.put(str(i)) for i in xrange(20)]
for i in xrange(20):
sys.stderr.write(".")
x.wait()
self.assertGreater(time.time() - time_start, 1.5)
@skip_if_disabled
def test_can_consume(self):
x = buckets.TokenBucketQueue(fill_rate=1)
x.put("The quick brown fox")
self.assertEqual(x.get(), "The quick brown fox")
time.sleep(0.1)
# Not yet ready for another token
x.put("The lazy dog")
with self.assertRaises(x.RateLimitExceeded):
x.get()
@skip_if_disabled
def test_expected_time(self):
x = buckets.TokenBucketQueue(fill_rate=1)
x.put_nowait("The quick brown fox")
self.assertEqual(x.get_nowait(), "The quick brown fox")
self.assertFalse(x.expected_time())
@skip_if_disabled
def test_qsize(self):
x = buckets.TokenBucketQueue(fill_rate=1)
x.put("The quick brown fox")
self.assertEqual(x.qsize(), 1)
self.assertEqual(x.get_nowait(), "The quick brown fox")
class test_rate_limit_string(Case):
@skip_if_disabled
def test_conversion(self):
self.assertEqual(timeutils.rate(999), 999)
self.assertEqual(timeutils.rate("1456/s"), 1456)
self.assertEqual(timeutils.rate("100/m"),
100 / 60.0)
self.assertEqual(timeutils.rate("10/h"),
10 / 60.0 / 60.0)
for zero in (0, None, "0", "0/m", "0/h", "0/s"):
self.assertEqual(timeutils.rate(zero), 0)
class TaskA(Task):
rate_limit = 10
class TaskB(Task):
rate_limit = None
class TaskC(Task):
rate_limit = "1/s"
class TaskD(Task):
rate_limit = "1000/m"
class test_TaskBucket(Case):
def setUp(self):
self.registry = TaskRegistry()
self.task_classes = (TaskA, TaskB, TaskC)
for task_cls in self.task_classes:
self.registry.register(task_cls)
@skip_if_disabled
def test_get_nowait(self):
x = buckets.TaskBucket(task_registry=self.registry)
with self.assertRaises(buckets.Empty):
x.get_nowait()
@skip_if_disabled
def test_refresh(self):
reg = {}
x = buckets.TaskBucket(task_registry=reg)
reg["foo"] = "something"
x.refresh()
self.assertIn("foo", x.buckets)
self.assertTrue(x.get_bucket_for_type("foo"))
@skip_if_disabled
def test__get_queue_for_type(self):
x = buckets.TaskBucket(task_registry={})
x.buckets["foo"] = buckets.TokenBucketQueue(fill_rate=1)
self.assertIs(x._get_queue_for_type("foo"), x.buckets["foo"].queue)
x.buckets["bar"] = buckets.FastQueue()
self.assertIs(x._get_queue_for_type("bar"), x.buckets["bar"])
@skip_if_disabled
def test_update_bucket_for_type(self):
bucket = buckets.TaskBucket(task_registry=self.registry)
b = bucket._get_queue_for_type(TaskC.name)
self.assertIs(bucket.update_bucket_for_type(TaskC.name).queue, b)
self.assertIs(bucket.buckets[TaskC.name].queue, b)
@skip_if_disabled
def test_auto_add_on_missing_put(self):
reg = {}
b = buckets.TaskBucket(task_registry=reg)
reg["nonexisting.task"] = "foo"
b.put(MockJob(uuid(), "nonexisting.task", (), {}))
self.assertIn("nonexisting.task", b.buckets)
@skip_if_disabled
def test_auto_add_on_missing(self):
b = buckets.TaskBucket(task_registry=self.registry)
for task_cls in self.task_classes:
self.assertIn(task_cls.name, b.buckets.keys())
self.registry.register(TaskD)
self.assertTrue(b.get_bucket_for_type(TaskD.name))
self.assertIn(TaskD.name, b.buckets.keys())
self.registry.unregister(TaskD)
@skip_if_disabled
def test_has_rate_limits(self):
b = buckets.TaskBucket(task_registry=self.registry)
self.assertEqual(b.buckets[TaskA.name]._bucket.fill_rate, 10)
self.assertIsInstance(b.buckets[TaskB.name], buckets.Queue)
self.assertEqual(b.buckets[TaskC.name]._bucket.fill_rate, 1)
self.registry.register(TaskD)
b.init_with_registry()
try:
self.assertEqual(b.buckets[TaskD.name]._bucket.fill_rate,
1000 / 60.0)
finally:
self.registry.unregister(TaskD)
@skip_if_disabled
def test_on_empty_buckets__get_raises_empty(self):
b = buckets.TaskBucket(task_registry=self.registry)
with self.assertRaises(buckets.Empty):
b.get(block=False)
self.assertEqual(b.qsize(), 0)
@skip_if_disabled
def test_put__get(self):
b = buckets.TaskBucket(task_registry=self.registry)
job = MockJob(uuid(), TaskA.name, ["theqbf"], {"foo": "bar"})
b.put(job)
self.assertEqual(b.get(), job)
@skip_if_disabled
def test_fill_rate(self):
b = buckets.TaskBucket(task_registry=self.registry)
cjob = lambda i: MockJob(uuid(), TaskA.name, [i], {})
jobs = [cjob(i) for i in xrange(20)]
[b.put(job) for job in jobs]
self.assertEqual(b.qsize(), 20)
# 20 items should take at least one second to complete
time_start = time.time()
for i, job in enumerate(jobs):
sys.stderr.write(".")
self.assertEqual(b.get(), job)
self.assertGreater(time.time() - time_start, 1.5)
@skip_if_disabled
def test__very_busy_queue_doesnt_block_others(self):
b = buckets.TaskBucket(task_registry=self.registry)
cjob = lambda i, t: MockJob(uuid(), t.name, [i], {})
ajobs = [cjob(i, TaskA) for i in xrange(10)]
bjobs = [cjob(i, TaskB) for i in xrange(20)]
jobs = list(chain(*izip(bjobs, ajobs)))
for job in jobs:
b.put(job)
got_ajobs = 0
for job in (b.get() for i in xrange(20)):
if job.task_name == TaskA.name:
got_ajobs += 1
self.assertGreater(got_ajobs, 2)
@skip_if_disabled
def test_thorough__multiple_types(self):
self.registry.register(TaskD)
try:
b = buckets.TaskBucket(task_registry=self.registry)
cjob = lambda i, t: MockJob(uuid(), t.name, [i], {})
ajobs = [cjob(i, TaskA) for i in xrange(10)]
bjobs = [cjob(i, TaskB) for i in xrange(10)]
cjobs = [cjob(i, TaskC) for i in xrange(10)]
djobs = [cjob(i, TaskD) for i in xrange(10)]
# Spread the jobs around.
jobs = list(chain(*izip(ajobs, bjobs, cjobs, djobs)))
[b.put(job) for job in jobs]
for i, job in enumerate(jobs):
sys.stderr.write(".")
self.assertTrue(b.get(), job)
self.assertEqual(i + 1, len(jobs))
finally:
self.registry.unregister(TaskD)
@skip_if_disabled
def test_empty(self):
x = buckets.TaskBucket(task_registry=self.registry)
self.assertTrue(x.empty())
x.put(MockJob(uuid(), TaskC.name, [], {}))
self.assertFalse(x.empty())
x.clear()
self.assertTrue(x.empty())
@skip_if_disabled
def test_items(self):
x = buckets.TaskBucket(task_registry=self.registry)
x.buckets[TaskA.name].put(1)
x.buckets[TaskB.name].put(2)
x.buckets[TaskC.name].put(3)
self.assertEqual(sorted(x.items), [1, 2, 3])
class test_FastQueue(Case):
def test_items(self):
x = buckets.FastQueue()
x.put(10)
x.put(20)
self.assertListEqual([10, 20], list(x.items))
def test_wait(self):
x = buckets.FastQueue()
x.put(10)
self.assertEqual(x.wait(), 10)
def test_clear(self):
x = buckets.FastQueue()
x.put(10)
x.put(20)
self.assertFalse(x.empty())
x.clear()
self.assertTrue(x.empty())
| bsd-3-clause |
takeshineshiro/nova | nova/api/openstack/compute/scheduler_hints.py | 3 | 1768 | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.compute.schemas import scheduler_hints as schema
from nova.api.openstack import extensions
ALIAS = "os-scheduler-hints"
class SchedulerHints(extensions.V3APIExtensionBase):
"""Pass arbitrary key/value pairs to the scheduler."""
name = "SchedulerHints"
alias = ALIAS
version = 1
def get_controller_extensions(self):
return []
def get_resources(self):
return []
# NOTE(gmann): Accepting request body in this function to fetch "scheduler
# hint". This is a workaround to allow OS_SCH-HNT at the top level
# of the body request, but that it will be changed in the future to be a
# subset of the servers dict.
def server_create(self, server_dict, create_kwargs, req_body):
scheduler_hints = {}
if 'os:scheduler_hints' in req_body:
scheduler_hints = req_body['os:scheduler_hints']
elif 'OS-SCH-HNT:scheduler_hints' in req_body:
scheduler_hints = req_body['OS-SCH-HNT:scheduler_hints']
create_kwargs['scheduler_hints'] = scheduler_hints
def get_server_create_schema(self):
return schema.server_create
| apache-2.0 |
openstack-infra/shade | shade/tests/functional/test_router.py | 1 | 13914 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_router
----------------------------------
Functional tests for `shade` router methods.
"""
import ipaddress
from shade.exc import OpenStackCloudException
from shade.tests.functional import base
EXPECTED_TOPLEVEL_FIELDS = (
'id', 'name', 'admin_state_up', 'external_gateway_info',
'tenant_id', 'routes', 'status'
)
EXPECTED_GW_INFO_FIELDS = ('network_id', 'enable_snat', 'external_fixed_ips')
class TestRouter(base.BaseFunctionalTestCase):
def setUp(self):
super(TestRouter, self).setUp()
if not self.operator_cloud.has_service('network'):
self.skipTest('Network service not supported by cloud')
self.router_prefix = self.getUniqueString('router')
self.network_prefix = self.getUniqueString('network')
self.subnet_prefix = self.getUniqueString('subnet')
# NOTE(Shrews): Order matters!
self.addCleanup(self._cleanup_networks)
self.addCleanup(self._cleanup_subnets)
self.addCleanup(self._cleanup_routers)
def _cleanup_routers(self):
exception_list = list()
for router in self.operator_cloud.list_routers():
if router['name'].startswith(self.router_prefix):
try:
self.operator_cloud.delete_router(router['name'])
except Exception as e:
exception_list.append(str(e))
continue
if exception_list:
raise OpenStackCloudException('\n'.join(exception_list))
def _cleanup_networks(self):
exception_list = list()
for network in self.operator_cloud.list_networks():
if network['name'].startswith(self.network_prefix):
try:
self.operator_cloud.delete_network(network['name'])
except Exception as e:
exception_list.append(str(e))
continue
if exception_list:
raise OpenStackCloudException('\n'.join(exception_list))
def _cleanup_subnets(self):
exception_list = list()
for subnet in self.operator_cloud.list_subnets():
if subnet['name'].startswith(self.subnet_prefix):
try:
self.operator_cloud.delete_subnet(subnet['id'])
except Exception as e:
exception_list.append(str(e))
continue
if exception_list:
raise OpenStackCloudException('\n'.join(exception_list))
def test_create_router_basic(self):
net1_name = self.network_prefix + '_net1'
net1 = self.operator_cloud.create_network(
name=net1_name, external=True)
router_name = self.router_prefix + '_create_basic'
router = self.operator_cloud.create_router(
name=router_name,
admin_state_up=True,
ext_gateway_net_id=net1['id'],
)
for field in EXPECTED_TOPLEVEL_FIELDS:
self.assertIn(field, router)
ext_gw_info = router['external_gateway_info']
for field in EXPECTED_GW_INFO_FIELDS:
self.assertIn(field, ext_gw_info)
self.assertEqual(router_name, router['name'])
self.assertEqual('ACTIVE', router['status'])
self.assertEqual(net1['id'], ext_gw_info['network_id'])
self.assertTrue(ext_gw_info['enable_snat'])
def test_create_router_project(self):
project = self.operator_cloud.get_project('demo')
self.assertIsNotNone(project)
proj_id = project['id']
net1_name = self.network_prefix + '_net1'
net1 = self.operator_cloud.create_network(
name=net1_name, external=True, project_id=proj_id)
router_name = self.router_prefix + '_create_project'
router = self.operator_cloud.create_router(
name=router_name,
admin_state_up=True,
ext_gateway_net_id=net1['id'],
project_id=proj_id
)
for field in EXPECTED_TOPLEVEL_FIELDS:
self.assertIn(field, router)
ext_gw_info = router['external_gateway_info']
for field in EXPECTED_GW_INFO_FIELDS:
self.assertIn(field, ext_gw_info)
self.assertEqual(router_name, router['name'])
self.assertEqual('ACTIVE', router['status'])
self.assertEqual(proj_id, router['tenant_id'])
self.assertEqual(net1['id'], ext_gw_info['network_id'])
self.assertTrue(ext_gw_info['enable_snat'])
def _create_and_verify_advanced_router(self,
external_cidr,
external_gateway_ip=None):
# external_cidr must be passed in as unicode (u'')
# NOTE(Shrews): The arguments are needed because these tests
# will run in parallel and we want to make sure that each test
# is using different resources to prevent race conditions.
net1_name = self.network_prefix + '_net1'
sub1_name = self.subnet_prefix + '_sub1'
net1 = self.operator_cloud.create_network(
name=net1_name, external=True)
sub1 = self.operator_cloud.create_subnet(
net1['id'], external_cidr, subnet_name=sub1_name,
gateway_ip=external_gateway_ip
)
ip_net = ipaddress.IPv4Network(external_cidr)
last_ip = str(list(ip_net.hosts())[-1])
router_name = self.router_prefix + '_create_advanced'
router = self.operator_cloud.create_router(
name=router_name,
admin_state_up=False,
ext_gateway_net_id=net1['id'],
enable_snat=False,
ext_fixed_ips=[
{'subnet_id': sub1['id'], 'ip_address': last_ip}
]
)
for field in EXPECTED_TOPLEVEL_FIELDS:
self.assertIn(field, router)
ext_gw_info = router['external_gateway_info']
for field in EXPECTED_GW_INFO_FIELDS:
self.assertIn(field, ext_gw_info)
self.assertEqual(router_name, router['name'])
self.assertEqual('ACTIVE', router['status'])
self.assertFalse(router['admin_state_up'])
self.assertEqual(1, len(ext_gw_info['external_fixed_ips']))
self.assertEqual(
sub1['id'],
ext_gw_info['external_fixed_ips'][0]['subnet_id']
)
self.assertEqual(
last_ip,
ext_gw_info['external_fixed_ips'][0]['ip_address']
)
return router
def test_create_router_advanced(self):
self._create_and_verify_advanced_router(external_cidr=u'10.2.2.0/24')
def test_add_remove_router_interface(self):
router = self._create_and_verify_advanced_router(
external_cidr=u'10.3.3.0/24')
net_name = self.network_prefix + '_intnet1'
sub_name = self.subnet_prefix + '_intsub1'
net = self.operator_cloud.create_network(name=net_name)
sub = self.operator_cloud.create_subnet(
net['id'], '10.4.4.0/24', subnet_name=sub_name,
gateway_ip='10.4.4.1'
)
iface = self.operator_cloud.add_router_interface(
router, subnet_id=sub['id'])
self.assertIsNone(
self.operator_cloud.remove_router_interface(
router, subnet_id=sub['id'])
)
# Test return values *after* the interface is detached so the
# resources we've created can be cleaned up if these asserts fail.
self.assertIsNotNone(iface)
for key in ('id', 'subnet_id', 'port_id', 'tenant_id'):
self.assertIn(key, iface)
self.assertEqual(router['id'], iface['id'])
self.assertEqual(sub['id'], iface['subnet_id'])
def test_list_router_interfaces(self):
router = self._create_and_verify_advanced_router(
external_cidr=u'10.5.5.0/24')
net_name = self.network_prefix + '_intnet1'
sub_name = self.subnet_prefix + '_intsub1'
net = self.operator_cloud.create_network(name=net_name)
sub = self.operator_cloud.create_subnet(
net['id'], '10.6.6.0/24', subnet_name=sub_name,
gateway_ip='10.6.6.1'
)
iface = self.operator_cloud.add_router_interface(
router, subnet_id=sub['id'])
all_ifaces = self.operator_cloud.list_router_interfaces(router)
int_ifaces = self.operator_cloud.list_router_interfaces(
router, interface_type='internal')
ext_ifaces = self.operator_cloud.list_router_interfaces(
router, interface_type='external')
self.assertIsNone(
self.operator_cloud.remove_router_interface(
router, subnet_id=sub['id'])
)
# Test return values *after* the interface is detached so the
# resources we've created can be cleaned up if these asserts fail.
self.assertIsNotNone(iface)
self.assertEqual(2, len(all_ifaces))
self.assertEqual(1, len(int_ifaces))
self.assertEqual(1, len(ext_ifaces))
ext_fixed_ips = router['external_gateway_info']['external_fixed_ips']
self.assertEqual(ext_fixed_ips[0]['subnet_id'],
ext_ifaces[0]['fixed_ips'][0]['subnet_id'])
self.assertEqual(sub['id'], int_ifaces[0]['fixed_ips'][0]['subnet_id'])
def test_update_router_name(self):
router = self._create_and_verify_advanced_router(
external_cidr=u'10.7.7.0/24')
new_name = self.router_prefix + '_update_name'
updated = self.operator_cloud.update_router(
router['id'], name=new_name)
self.assertIsNotNone(updated)
for field in EXPECTED_TOPLEVEL_FIELDS:
self.assertIn(field, updated)
# Name is the only change we expect
self.assertEqual(new_name, updated['name'])
# Validate nothing else changed
self.assertEqual(router['status'], updated['status'])
self.assertEqual(router['admin_state_up'], updated['admin_state_up'])
self.assertEqual(router['external_gateway_info'],
updated['external_gateway_info'])
def test_update_router_routes(self):
router = self._create_and_verify_advanced_router(
external_cidr=u'10.7.7.0/24')
routes = [{
"destination": "10.7.7.0/24",
"nexthop": "10.7.7.99"
}]
updated = self.operator_cloud.update_router(
router['id'], routes=routes)
self.assertIsNotNone(updated)
for field in EXPECTED_TOPLEVEL_FIELDS:
self.assertIn(field, updated)
# Name is the only change we expect
self.assertEqual(routes, updated['routes'])
# Validate nothing else changed
self.assertEqual(router['status'], updated['status'])
self.assertEqual(router['admin_state_up'], updated['admin_state_up'])
self.assertEqual(router['external_gateway_info'],
updated['external_gateway_info'])
def test_update_router_admin_state(self):
router = self._create_and_verify_advanced_router(
external_cidr=u'10.8.8.0/24')
updated = self.operator_cloud.update_router(
router['id'], admin_state_up=True)
self.assertIsNotNone(updated)
for field in EXPECTED_TOPLEVEL_FIELDS:
self.assertIn(field, updated)
# admin_state_up is the only change we expect
self.assertTrue(updated['admin_state_up'])
self.assertNotEqual(router['admin_state_up'],
updated['admin_state_up'])
# Validate nothing else changed
self.assertEqual(router['status'], updated['status'])
self.assertEqual(router['name'], updated['name'])
self.assertEqual(router['external_gateway_info'],
updated['external_gateway_info'])
def test_update_router_ext_gw_info(self):
router = self._create_and_verify_advanced_router(
external_cidr=u'10.9.9.0/24')
# create a new subnet
existing_net_id = router['external_gateway_info']['network_id']
sub_name = self.subnet_prefix + '_update'
sub = self.operator_cloud.create_subnet(
existing_net_id, '10.10.10.0/24', subnet_name=sub_name,
gateway_ip='10.10.10.1'
)
updated = self.operator_cloud.update_router(
router['id'],
ext_gateway_net_id=existing_net_id,
ext_fixed_ips=[
{'subnet_id': sub['id'], 'ip_address': '10.10.10.77'}
]
)
self.assertIsNotNone(updated)
for field in EXPECTED_TOPLEVEL_FIELDS:
self.assertIn(field, updated)
# external_gateway_info is the only change we expect
ext_gw_info = updated['external_gateway_info']
self.assertEqual(1, len(ext_gw_info['external_fixed_ips']))
self.assertEqual(
sub['id'],
ext_gw_info['external_fixed_ips'][0]['subnet_id']
)
self.assertEqual(
'10.10.10.77',
ext_gw_info['external_fixed_ips'][0]['ip_address']
)
# Validate nothing else changed
self.assertEqual(router['status'], updated['status'])
self.assertEqual(router['name'], updated['name'])
self.assertEqual(router['admin_state_up'], updated['admin_state_up'])
| apache-2.0 |
mitya57/django | tests/m2m_and_m2o/tests.py | 131 | 2700 | from django.db.models import Q
from django.test import TestCase
from .models import Issue, StringReferenceModel, User
class RelatedObjectTests(TestCase):
def test_related_objects_have_name_attribute(self):
for field_name in ('test_issue_client', 'test_issue_cc'):
obj = User._meta.get_field(field_name)
self.assertEqual(field_name, obj.field.related_query_name())
def test_m2m_and_m2o(self):
r = User.objects.create(username="russell")
g = User.objects.create(username="gustav")
i1 = Issue(num=1)
i1.client = r
i1.save()
i2 = Issue(num=2)
i2.client = r
i2.save()
i2.cc.add(r)
i3 = Issue(num=3)
i3.client = g
i3.save()
i3.cc.add(r)
self.assertQuerysetEqual(
Issue.objects.filter(client=r.id), [
1,
2,
],
lambda i: i.num
)
self.assertQuerysetEqual(
Issue.objects.filter(client=g.id), [
3,
],
lambda i: i.num
)
self.assertQuerysetEqual(
Issue.objects.filter(cc__id__exact=g.id), []
)
self.assertQuerysetEqual(
Issue.objects.filter(cc__id__exact=r.id), [
2,
3,
],
lambda i: i.num
)
# These queries combine results from the m2m and the m2o relationships.
# They're three ways of saying the same thing.
self.assertQuerysetEqual(
Issue.objects.filter(Q(cc__id__exact=r.id) | Q(client=r.id)), [
1,
2,
3,
],
lambda i: i.num
)
self.assertQuerysetEqual(
Issue.objects.filter(cc__id__exact=r.id) | Issue.objects.filter(client=r.id), [
1,
2,
3,
],
lambda i: i.num
)
self.assertQuerysetEqual(
Issue.objects.filter(Q(client=r.id) | Q(cc__id__exact=r.id)), [
1,
2,
3,
],
lambda i: i.num
)
class RelatedObjectUnicodeTests(TestCase):
def test_m2m_with_unicode_reference(self):
"""
Regression test for #6045: references to other models can be
strings, providing they are directly convertible to ASCII.
"""
m1 = StringReferenceModel.objects.create()
m2 = StringReferenceModel.objects.create()
m2.others.add(m1) # used to cause an error (see ticket #6045)
m2.save()
list(m2.others.all()) # Force retrieval.
| bsd-3-clause |
hkawasaki/kawasaki-aio8-1 | lms/djangoapps/courseware/features/video.py | 8 | 20997 | # -*- coding: utf-8 -*-
# pylint: disable=C0111
from lettuce import world, step, before, after
import json
import os
import time
import requests
from nose.tools import assert_less, assert_equal, assert_true, assert_false
from common import i_am_registered_for_the_course, visit_scenario_item
from django.utils.translation import ugettext as _
from django.conf import settings
from cache_toolbox.core import del_cached_content
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
TEST_ROOT = settings.COMMON_TEST_DATA_ROOT
LANGUAGES = settings.ALL_LANGUAGES
VIDEO_SOURCE_PORT = settings.VIDEO_SOURCE_PORT
############### ACTIONS ####################
HTML5_SOURCES = [
'http://localhost:{0}/gizmo.mp4'.format(VIDEO_SOURCE_PORT),
'http://localhost:{0}/gizmo.webm'.format(VIDEO_SOURCE_PORT),
'http://localhost:{0}/gizmo.ogv'.format(VIDEO_SOURCE_PORT),
]
FLASH_SOURCES = {
'youtube_id_1_0': 'OEoXaMPEzfM',
'youtube_id_0_75': 'JMD_ifUUfsU',
'youtube_id_1_25': 'AKqURZnYqpk',
'youtube_id_1_5': 'DYpADpL7jAY',
}
HTML5_SOURCES_INCORRECT = [
'http://localhost:{0}/gizmo.mp99'.format(VIDEO_SOURCE_PORT),
]
VIDEO_BUTTONS = {
'CC': '.hide-subtitles',
'volume': '.volume',
'play': '.video_control.play',
'pause': '.video_control.pause',
'fullscreen': '.add-fullscreen',
'download_transcript': '.video-tracks > a',
'quality': '.quality-control',
}
VIDEO_MENUS = {
'language': '.lang .menu',
'speed': '.speed .menu',
'download_transcript': '.video-tracks .a11y-menu-list',
}
coursenum = 'test_course'
@before.each_scenario
def setUp(scenario):
world.video_sequences = {}
@after.each_scenario
def tearDown(scenario):
world.browser.cookies.delete('edX_video_player_mode')
class RequestHandlerWithSessionId(object):
def get(self, url):
"""
Sends a request.
"""
kwargs = dict()
session_id = [{i['name']:i['value']} for i in world.browser.cookies.all() if i['name'] == u'sessionid']
if session_id:
kwargs.update({
'cookies': session_id[0]
})
response = requests.get(url, **kwargs)
self.response = response
self.status_code = response.status_code
self.headers = response.headers
self.content = response.content
return self
def is_success(self):
"""
Returns `True` if the response was succeed, otherwise, returns `False`.
"""
if self.status_code < 400:
return True
return False
def check_header(self, name, value):
"""
Returns `True` if the response header exist and has appropriate value,
otherwise, returns `False`.
"""
if value in self.headers.get(name, ''):
return True
return False
def get_metadata(parent_location, player_mode, data, display_name='Video'):
kwargs = {
'parent_location': parent_location,
'category': 'video',
'display_name': display_name,
'metadata': {},
}
if player_mode == 'html5':
kwargs['metadata'].update({
'youtube_id_1_0': '',
'youtube_id_0_75': '',
'youtube_id_1_25': '',
'youtube_id_1_5': '',
'html5_sources': HTML5_SOURCES
})
if player_mode == 'youtube_html5':
kwargs['metadata'].update({
'html5_sources': HTML5_SOURCES,
})
if player_mode == 'youtube_html5_unsupported_video':
kwargs['metadata'].update({
'html5_sources': HTML5_SOURCES_INCORRECT
})
if player_mode == 'html5_unsupported_video':
kwargs['metadata'].update({
'youtube_id_1_0': '',
'youtube_id_0_75': '',
'youtube_id_1_25': '',
'youtube_id_1_5': '',
'html5_sources': HTML5_SOURCES_INCORRECT
})
if player_mode == 'flash':
kwargs['metadata'].update(FLASH_SOURCES)
world.browser.cookies.add({'edX_video_player_mode': 'flash'})
if data:
conversions = {
'transcripts': json.loads,
'download_track': json.loads,
'download_video': json.loads,
}
for key in data:
if key in conversions:
data[key] = conversions[key](data[key])
kwargs['metadata'].update(data)
return kwargs
def add_videos_to_course(course, player_mode=None, display_names=None, hashes=None):
parent_location = add_vertical_to_course(course)
kwargs = {
'course': course,
'parent_location': parent_location,
'player_mode': player_mode,
'display_name': display_names[0],
}
if hashes:
for index, item_data in enumerate(hashes):
kwargs.update({
'display_name': display_names[index],
'data': item_data,
})
add_video_to_course(**kwargs)
else:
add_video_to_course(**kwargs)
def add_video_to_course(course, parent_location=None, player_mode=None, data=None, display_name='Video'):
if not parent_location:
parent_location = add_vertical_to_course(course)
kwargs = get_metadata(parent_location, player_mode, data, display_name=display_name)
world.scenario_dict['VIDEO'] = world.ItemFactory.create(**kwargs)
def add_vertical_to_course(course_num):
world.scenario_dict['LAST_VERTICAL'] = world.ItemFactory.create(
parent_location=world.scenario_dict['SECTION'].location,
category='vertical',
display_name='Test Vertical-{}'.format(len(set(world.video_sequences.values()))),
)
return last_vertical_location(course_num)
def last_vertical_location(course_num):
return world.scenario_dict['LAST_VERTICAL'].location._replace(course=course_num)
def upload_file(filename, location):
path = os.path.join(TEST_ROOT, 'uploads/', filename)
f = open(os.path.abspath(path))
mime_type = "application/json"
content_location = StaticContent.compute_location(
location.org, location.course, filename
)
content = StaticContent(content_location, filename, mime_type, f.read())
contentstore().save(content)
del_cached_content(content.location)
def navigate_to_an_item_in_a_sequence(number):
sequence_css = '#sequence-list a[data-element="{0}"]'.format(number)
world.css_click(sequence_css)
def change_video_speed(speed):
world.browser.execute_script("$('.speeds').addClass('is-opened')")
speed_css = 'li[data-speed="{0}"] a'.format(speed)
world.wait_for_visible('.speeds')
world.css_click(speed_css)
def open_menu(menu):
world.browser.execute_script("$('{selector}').parent().addClass('is-opened')".format(
selector=VIDEO_MENUS[menu]
))
def get_all_dimensions():
video = get_dimensions('.video-player iframe, .video-player video')
wrapper = get_dimensions('.tc-wrapper')
controls = get_dimensions('.video-controls')
progress_slider = get_dimensions('.video-controls > .slider')
expected = dict(wrapper)
expected['height'] -= controls['height'] + 0.5 * progress_slider['height']
return (video, expected)
def get_dimensions(selector):
element = world.css_find(selector).first
return element._element.size
def get_window_dimensions():
return world.browser.driver.get_window_size()
def set_window_dimensions(width, height):
world.browser.driver.set_window_size(width, height)
# Wait 200 ms when JS finish resizing
world.wait(0.2)
def duration():
"""
Total duration of the video, in seconds.
"""
elapsed_time, duration = video_time()
return duration
def elapsed_time():
"""
Elapsed time of the video, in seconds.
"""
elapsed_time, duration = video_time()
return elapsed_time
def video_time():
"""
Return a tuple `(elapsed_time, duration)`, each in seconds.
"""
# The full time has the form "0:32 / 3:14"
full_time = world.css_text('div.vidtime')
# Split the time at the " / ", to get ["0:32", "3:14"]
elapsed_str, duration_str = full_time.split(' / ')
# Convert each string to seconds
return (parse_time_str(elapsed_str), parse_time_str(duration_str))
def parse_time_str(time_str):
"""
Parse a string of the form 1:23 into seconds (int).
"""
time_obj = time.strptime(time_str, '%M:%S')
return time_obj.tm_min * 60 + time_obj.tm_sec
def find_caption_line_by_data_index(index):
SELECTOR = ".subtitles > li[data-index='{index}']".format(index=index)
return world.css_find(SELECTOR).first
def wait_for_video():
world.wait_for_present('.is-initialized')
world.wait_for_present('div.vidtime')
world.wait_for_invisible('.video-wrapper .spinner')
world.wait_for_ajax_complete()
@step("I reload the page with video$")
def reload_the_page_with_video(_step):
_step.given('I reload the page')
wait_for_video()
@step('youtube stub server (.*) YouTube API')
def configure_youtube_api(_step, action):
action=action.strip()
if action == 'proxies':
world.youtube.config['youtube_api_blocked'] = False
elif action == 'blocks':
world.youtube.config['youtube_api_blocked'] = True
else:
raise ValueError('Parameter `action` should be one of "proxies" or "blocks".')
@step('when I view the (.*) it does not have autoplay enabled$')
def does_not_autoplay(_step, video_type):
actual = world.css_find('.%s' % video_type)[0]['data-autoplay']
expected = [u'False', u'false', False]
assert actual in expected
@step('the course has a Video component in "([^"]*)" mode(?:\:)?$')
def view_video(_step, player_mode):
i_am_registered_for_the_course(_step, coursenum)
data = _step.hashes[0] if _step.hashes else None
add_video_to_course(coursenum, player_mode=player_mode.lower(), data=data)
visit_scenario_item('SECTION')
wait_for_video()
@step('a video in "([^"]*)" mode(?:\:)?$')
def add_video(_step, player_mode):
data = _step.hashes[0] if _step.hashes else None
add_video_to_course(coursenum, player_mode=player_mode.lower(), data=data)
visit_scenario_item('SECTION')
wait_for_video()
@step('video(?:s)? "([^"]*)" in "([^"]*)" mode in position "([^"]*)" of sequential(?:\:)?$')
def add_video_in_position(_step, video_ids, player_mode, position):
sequences = {video_id.strip(): position for video_id in video_ids.split(',')}
add_videos_to_course(coursenum, player_mode=player_mode.lower(), display_names=sequences.keys(), hashes=_step.hashes)
world.video_sequences.update(sequences)
@step('I open the section with videos$')
def visit_video_section(_step):
visit_scenario_item('SECTION')
wait_for_video()
@step('I select the "([^"]*)" speed$')
def i_select_video_speed(_step, speed):
change_video_speed(speed)
@step('I select the "([^"]*)" speed on video "([^"]*)"$')
def change_video_speed_on_video(_step, speed, player_id):
navigate_to_an_item_in_a_sequence(world.video_sequences[player_id])
change_video_speed(speed)
@step('I open video "([^"]*)"$')
def open_video(_step, player_id):
navigate_to_an_item_in_a_sequence(world.video_sequences[player_id])
@step('video "([^"]*)" should start playing at speed "([^"]*)"$')
def check_video_speed(_step, player_id, speed):
speed_css = '.speeds .value'
assert world.css_has_text(speed_css, '{0}x'.format(speed))
@step('youtube server is up and response time is (.*) seconds$')
def set_youtube_response_timeout(_step, time):
world.youtube.config['time_to_response'] = float(time)
@step('the video has rendered in "([^"]*)" mode$')
def video_is_rendered(_step, mode):
modes = {
'html5': 'video',
'youtube': 'iframe',
'flash': 'iframe',
}
html_tag = modes[mode.lower()]
assert world.css_find('.video {0}'.format(html_tag)).first
@step('videos have rendered in "([^"]*)" mode$')
def videos_are_rendered(_step, mode):
modes = {
'html5': 'video',
'youtube': 'iframe',
'flash': 'iframe',
}
html_tag = modes[mode.lower()]
actual = len(world.css_find('.video {0}'.format(html_tag)))
expected = len(world.css_find('.xmodule_VideoModule'))
assert actual == expected
@step('all sources are correct$')
def all_sources_are_correct(_step):
elements = world.css_find('.video-player video source')
sources = [source['src'].split('?')[0] for source in elements]
assert set(sources) == set(HTML5_SOURCES)
@step('error message is shown$')
def error_message_is_shown(_step):
selector = '.video .video-player h3'
assert world.css_visible(selector)
@step('error message has correct text$')
def error_message_has_correct_text(_step):
selector = '.video .video-player h3'
text = _('ERROR: No playable video sources found!')
assert world.css_has_text(selector, text)
@step('I make sure captions are (.+)$')
def set_captions_visibility_state(_step, captions_state):
SELECTOR = '.closed .subtitles'
if world.is_css_not_present(SELECTOR, wait_time=30):
if captions_state == 'closed':
world.css_click('.hide-subtitles')
else:
if captions_state != 'closed':
world.css_click('.hide-subtitles')
@step('I see video menu "([^"]*)" with correct items$')
def i_see_menu(_step, menu):
open_menu(menu)
menu_items = world.css_find(VIDEO_MENUS[menu] + ' li')
video = world.scenario_dict['VIDEO']
transcripts = dict(video.transcripts)
if video.sub:
transcripts.update({
'en': video.sub
})
languages = {i[0]: i[1] for i in LANGUAGES}
transcripts = {k: languages[k] for k in transcripts}
for code, label in transcripts.items():
assert any([i.text == label for i in menu_items])
assert any([i['data-lang-code'] == code for i in menu_items])
@step('I see "([^"]*)" text in the captions$')
def check_text_in_the_captions(_step, text):
world.wait_for_present('.video.is-captions-rendered')
world.wait_for(lambda _: world.css_text('.subtitles'))
actual_text = world.css_text('.subtitles')
assert (text in actual_text)
@step('I see text in the captions:')
def check_captions(_step):
world.wait_for_present('.video.is-captions-rendered')
for index, video in enumerate(_step.hashes):
assert (video.get('text') in world.css_text('.subtitles', index=index))
@step('I select language with code "([^"]*)"$')
def select_language(_step, code):
world.wait_for_visible('.video-controls')
# Make sure that all ajax requests that affects the language menu are finished.
# For example, request to get new translation etc.
world.wait_for_ajax_complete()
selector = VIDEO_MENUS["language"] + ' li[data-lang-code="{code}"]'.format(
code=code
)
world.css_find(VIDEO_BUTTONS["CC"])[0].mouse_over()
world.wait_for_present('.lang.open')
world.css_click(selector)
assert world.css_has_class(selector, 'is-active')
assert len(world.css_find(VIDEO_MENUS["language"] + ' li.is-active')) == 1
# Make sure that all ajax requests that affects the display of captions are finished.
# For example, request to get new translation etc.
world.wait_for_ajax_complete()
world.wait_for_visible('.subtitles')
world.wait_for_present('.video.is-captions-rendered')
@step('I click video button "([^"]*)"$')
def click_button(_step, button):
world.css_click(VIDEO_BUTTONS[button])
if button == "play":
# Needs to wait for video buffrization
world.wait_for(
func=lambda _: world.css_has_class('.video', 'is-playing') and world.is_css_present(VIDEO_BUTTONS['pause']),
timeout=30
)
world.wait_for_ajax_complete()
@step('I see video slider at "([^"]*)" position$')
def start_playing_video_from_n_seconds(_step, time_str):
position = parse_time_str(time_str)
actual_position = elapsed_time()
assert_equal(actual_position, int(position), "Current position is {}, but should be {}".format(actual_position, position))
@step('I see duration "([^"]*)"$')
def i_see_duration(_step, position):
world.wait_for(
func=lambda _: duration() > 0,
timeout=30
)
assert duration() == parse_time_str(position)
@step('I wait for video controls appear$')
def controls_appear(_step):
world.wait_for_visible('.video-controls')
@step('I seek video to "([^"]*)" position$')
def seek_video_to_n_seconds(_step, time_str):
time = parse_time_str(time_str)
jsCode = "$('.video').data('video-player-state').videoPlayer.onSlideSeek({{time: {0}}})".format(time)
world.browser.execute_script(jsCode)
world.wait_for(
func=lambda _: world.retry_on_exception(lambda: elapsed_time() == time and not world.css_has_class('.video', 'is-buffering')),
timeout=30
)
_step.given('I see video slider at "{0}" position'.format(time_str))
@step('I have a "([^"]*)" transcript file in assets$')
def upload_to_assets(_step, filename):
upload_file(filename, world.scenario_dict['COURSE'].location)
@step('menu "([^"]*)" doesn\'t exist$')
def is_hidden_menu(_step, menu):
assert world.is_css_not_present(VIDEO_MENUS[menu])
@step('I see video aligned correctly (with(?:out)?) enabled transcript$')
def video_alignment(_step, transcript_visibility):
# Width of the video container in css equal 75% of window if transcript enabled
wrapper_width = 75 if transcript_visibility == "with" else 100
initial = get_window_dimensions()
set_window_dimensions(300, 600)
real, expected = get_all_dimensions()
width = round(100 * real['width']/expected['width']) == wrapper_width
set_window_dimensions(600, 300)
real, expected = get_all_dimensions()
height = abs(expected['height'] - real['height']) <= 5
# Restore initial window size
set_window_dimensions(initial['width'], initial['height'])
assert all([width, height])
@step('I can download transcript in "([^"]*)" format that has text "([^"]*)"$')
def i_can_download_transcript(_step, format, text):
assert world.css_has_text('.video-tracks .a11y-menu-button', '.' + format, strip=True)
formats = {
'srt': 'application/x-subrip',
'txt': 'text/plain',
}
url = world.css_find(VIDEO_BUTTONS['download_transcript'])[0]['href']
request = RequestHandlerWithSessionId()
assert request.get(url).is_success()
assert request.check_header('content-type', formats[format])
assert (text.encode('utf-8') in request.content)
@step('I select the transcript format "([^"]*)"$')
def select_transcript_format(_step, format):
button_selector = '.video-tracks .a11y-menu-button'
menu_selector = VIDEO_MENUS['download_transcript']
button = world.css_find(button_selector).first
height = button._element.location_once_scrolled_into_view['y']
world.browser.driver.execute_script("window.scrollTo(0, {});".format(height))
button.mouse_over()
assert world.css_has_text(button_selector, '...', strip=True)
menu_items = world.css_find(menu_selector + ' a')
for item in menu_items:
if item['data-value'] == format:
item.click()
world.wait_for_ajax_complete()
break
world.browser.driver.execute_script("window.scrollTo(0, 0);")
assert world.css_find(menu_selector + ' .active a')[0]['data-value'] == format
assert world.css_has_text(button_selector, '.' + format, strip=True)
@step('video (.*) show the captions$')
def shows_captions(_step, show_captions):
if 'not' in show_captions or 'n\'t' in show_captions:
assert world.is_css_present('div.video.closed')
else:
assert world.is_css_not_present('div.video.closed')
@step('I click on caption line "([^"]*)", video module shows elapsed time "([^"]*)"$')
def click_on_the_caption(_step, index, expected_time):
world.wait_for_present('.video.is-captions-rendered')
find_caption_line_by_data_index(int(index)).click()
actual_time = elapsed_time()
assert int(expected_time) == actual_time
@step('button "([^"]*)" is (hidden|visible)$')
def is_hidden_button(_step, button, state):
selector = VIDEO_BUTTONS[button]
if state == 'hidden':
world.wait_for_invisible(selector)
assert_false(
world.css_visible(selector),
'Button {0} is invisible, but should be visible'.format(button)
)
else:
world.wait_for_visible(selector)
assert_true(
world.css_visible(selector),
'Button {0} is visible, but should be invisible'.format(button)
)
@step('button "([^"]*)" is (active|inactive)$')
def i_see_active_button(_step, button, state):
selector = VIDEO_BUTTONS[button]
if state == 'active':
assert world.css_has_class(selector, 'active')
else:
assert not world.css_has_class(selector, 'active')
| agpl-3.0 |
brightinteractive/cmsplugin-filer | cmsplugin_filer_file/migrations/0001_initial.py | 11 | 12020 |
from south.db import db
from django.db import models
from cmsplugin_filer_file.models import *
class Migration:
depends_on = (
("filer", "0008_polymorphic__del_field_file__file_type_plugin_name"),
("cms", "0039_auto__del_field_page_moderator_state"),
)
def forwards(self, orm):
# Adding model 'FilerFile'
db.create_table('cmsplugin_filerfile', (
('cmsplugin_ptr', orm['cmsplugin_filer_file.FilerFile:cmsplugin_ptr']),
('file', orm['cmsplugin_filer_file.FilerFile:file']),
('title', orm['cmsplugin_filer_file.FilerFile:title']),
))
db.send_create_signal('cmsplugin_filer_file', ['FilerFile'])
def backwards(self, orm):
# Deleting model 'FilerFile'
db.delete_table('cmsplugin_filerfile')
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.CMSPlugin']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'menu_login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cmsplugin_filer_file.filerfile': {
'Meta': {'db_table': "'cmsplugin_filerfile'"},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.File']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_file_type_plugin_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'file_field': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'unique_together': "(('parent', 'name'),)"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'sites.site': {
'Meta': {'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cmsplugin_filer_file']
| bsd-3-clause |
Murodese/pynab | pynab/requests.py | 1 | 2441 | import regex
from pynab import log
from pynab.db import db_session, Release, Pre, Group, windowed_query
import config
GROUP_ALIASES = {
# from: to
'alt.binaries.etc': 'alt.binaries.teevee',
}
GROUP_REQUEST_REGEXES = {
'alt.binaries.etc': '^(\d{4,8})$',
'alt.binaries.teevee': '^(\d{4,8})$',
'alt.binaries.moovee': '^(\d{4,8})$',
}
def process(limit=None):
"""Process releases for requests"""
with db_session() as db:
requests = {}
for group, reg in GROUP_REQUEST_REGEXES.items():
# noinspection PyComparisonWithNone
query = db.query(Release).join(Group).filter(Group.name==group).filter(Release.pre_id == None).\
filter(Release.category_id == '8010').filter("releases.name ~ '{}'".format(reg))
for release in windowed_query(query, Release.id, config.scan.get('binary_process_chunk_size')):
# check if it's aliased
if release.group.name in GROUP_ALIASES:
group_name = GROUP_ALIASES[release.group.name]
else:
group_name = release.group.name
if group_name not in requests:
requests[group_name] = {}
result = regex.search(reg, release.name)
if result:
requests[group_name][result.group(0)] = release
else:
log.info("requests: no release requests to process")
# per-group
for group_name, group_requests in requests.items():
# query for the requestids
if requests:
pres = db.query(Pre).filter(Pre.requestgroup==group_name).filter(Pre.requestid.in_(group_requests.keys())).all()
else:
log.info("requests: no pre requests found")
pres = []
# loop through and associate pres with their requests
for pre in pres:
# no longer need to check group
updated_release = group_requests.get(str(pre.requestid))
updated_release.pre_id = pre.id
db.merge(updated_release)
log.info("requests: found pre request id {} ({}) for {}".format(pre.requestid, group_name,
updated_release.name))
db.commit() | gpl-2.0 |
gazpachoking/deluge-old | deluge/ui/console/commands/cache.py | 8 | 2020 | #
# cache.py
#
# Copyright (C) 2009 Andrew Resch <andrewresch@gmail.com>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
from deluge.ui.console.main import BaseCommand
from deluge.ui.client import client
import deluge.ui.console.colors as colors
import deluge.component as component
class Command(BaseCommand):
"""Show information about the disk cache"""
usage = "Usage: cache"
def handle(self, *args, **options):
self.console = component.get("ConsoleUI")
def on_cache_status(status):
for key, value in status.items():
self.console.write("{!info!}%s: {!input!}%s" % (key, value))
d = client.core.get_cache_status()
d.addCallback(on_cache_status)
return d
| gpl-3.0 |
ryfeus/lambda-packs | Spacy/source2.7/spacy/lang/sv/lemmatizer/__init__.py | 3 | 2312 | # coding: utf8
from __future__ import unicode_literals
from .lookup import LOOKUP
LEMMA_RULES = {
"noun": [
["t", ""],
["n", ""],
["na", ""],
["na", "e"],
["or", "a"],
["orna", "a"],
["et", ""],
["en", ""],
["en", "e"],
["er", ""],
["erna", ""],
["ar", "e"],
["ar", ""],
["lar", "el"],
["arna", "e"],
["arna", ""],
["larna", "el"]
],
"verb": [
["r", ""],
["de", ""],
["t", ""],
["er", ""],
["te", ""],
["a", ""],
["e", ""],
["t", "d"],
["tt", "d"],
["tt", ""],
["ev", "iv"],
["ack", "ick"],
["ög", "yg"],
["it", ""],
["uckit", "ick"],
["ugit", "yg"],
["it", "et"],
["id", "ed"],
["ip", "ep"],
["iv", "ev"],
["in", "en"],
["ik", "ek"],
["ig", "eg"],
["ind", ""],
["inn", "ann"],
["nder", "nd"],
["inner", "inn"],
["and", "ind"],
["ann", "inn"],
["s", ""],
["anns", "inn"],
["undit", "ind"],
["unnit", "inn"],
["unnits", "inn"],
["uppit", "ipp"],
["ungit", "ing"],
["öd", "ud"],
["öt", "jut"],
["öt", "ut"],
["ög", "ug"],
["ögg", "ugg"],
["öng", "ung"],
["önk", "unk"],
["öt", "yt"],
["utit", "yt"],
["ös", "ys"],
["öv", "yv"],
["uvit", "yv"],
["öp", "yp"],
["upit", "yp"],
["ök", "yk"],
["ukit", "yk"],
["or", "ar"],
["öll", "all"],
["ät", "åt"],
["öll", "åll"],
["or", "är"],
["urit", "är"],
["åt", "ät"],
["ar", "är"],
["alt", "ält"],
["ultit", "ält"]
],
"adj": [
["are", ""],
["ast", ""],
["re", ""],
["st", ""],
["ägre", "åg"],
["ägst", "åg"],
["ängre", "ång"],
["ängst", "ång"],
["örre", "or"],
["örst", "or"]
],
"punct": [
["“", "\""],
["”", "\""],
["\u2018", "'"],
["\u2019", "'"]
]
}
| mit |
ARG-TLQ/Red-DiscordBot | cogs/tempchannels/tempchannels.py | 1 | 29365 | """Temporary channel cog.
Creates a temporary channel.
"""
from copy import deepcopy
from datetime import datetime
import logging
import time
import asyncio
import discord
from discord.ext import commands
from redbot.core import Config, checks, commands, data_manager
from redbot.core.bot import Red
from redbot.core.commands.context import Context
from .constants import *
class TempChannels(commands.Cog):
"""Creates a temporary channel."""
def __init__(self, bot: Red):
self.bot = bot
self.config = Config.get_conf(self, identifier=5842647, force_registration=True)
self.config.register_guild(**DEFAULT_GUILD)
# Initialize logger, and save to cog folder.
saveFolder = data_manager.cog_data_path(cog_instance=self)
self.logger = logging.getLogger("red.luicogs.TempChannels")
if self.logger.level == 0:
# Prevents the self.logger from being loaded again in case of module reload.
self.logger.setLevel(logging.INFO)
handler = logging.FileHandler(
filename=str(saveFolder) + "/info.log", encoding="utf-8", mode="a"
)
handler.setFormatter(
logging.Formatter("%(asctime)s %(message)s", datefmt="[%d/%m/%Y %H:%M:%S]")
)
self.logger.addHandler(handler)
self.bgTask = self.bot.loop.create_task(self.checkChannels())
# Cancel the background task on cog unload.
def __unload(self): # pylint: disable=invalid-name
self.bgTask.cancel()
def cog_unload(self):
self.__unload()
async def _syncSettings(self):
"""Force settings to file and reload."""
await self.config.put(KEY_SETTINGS, self.settings)
self.settings = self.config.get(KEY_SETTINGS)
@commands.group(name="tempchannels", aliases=["tc"])
@commands.guild_only()
@checks.mod_or_permissions(manage_messages=True)
async def tempChannels(self, ctx: Context):
"""Temporary text-channel creation (only 1 at the moment)."""
@tempChannels.command(name="show")
async def tempChannelsShow(self, ctx: Context):
"""Show current settings."""
tempCh = await self.config.guild(ctx.guild).all()
rolesAllow = [discord.utils.get(ctx.guild.roles, id=rid) for rid in tempCh[KEY_ROLE_ALLOW]]
rolesAllow = [roleName.name for roleName in rolesAllow if roleName]
rolesDeny = [discord.utils.get(ctx.guild.roles, id=rid) for rid in tempCh[KEY_ROLE_DENY]]
rolesDeny = [roleName.name for roleName in rolesDeny if roleName]
categoryName = discord.utils.get(ctx.guild.channels, id=tempCh[KEY_CH_CATEGORY])
msg = (
":information_source: TempChannel - Current Settings\n```"
"Enabled? {}\n"
"Archive after? {}\n"
"NSFW Prompt: {}\n"
"Roles Allowed: {}\n"
"Roles Denied: {}\n"
"Ch. Name: #{}\n"
"Ch. Topic: {}\n"
"Ch. Position: {}\n"
"Ch. Category: {}\n"
"Creation Time: {:002d}:{:002d}\n"
"Duration: {}h {}m"
"```".format(
"Yes" if tempCh[KEY_ENABLED] else "No",
"Yes" if tempCh[KEY_ARCHIVE] else "No",
"Yes" if tempCh[KEY_NSFW] else "No",
rolesAllow,
rolesDeny,
tempCh[KEY_CH_NAME],
tempCh[KEY_CH_TOPIC],
tempCh[KEY_CH_POS],
"{}".format(categoryName) if categoryName else "(not set)",
tempCh[KEY_START_HOUR],
tempCh[KEY_START_MIN],
tempCh[KEY_DURATION_HOURS],
tempCh[KEY_DURATION_MINS],
)
)
await ctx.send(msg)
@tempChannels.command(name="archive")
@checks.admin()
async def tempChannelsArchive(self, ctx: Context):
"""Toggle archiving the channel after the fact."""
async with self.config.guild(ctx.guild).all() as guildData:
if guildData[KEY_ARCHIVE]:
guildData[KEY_ARCHIVE] = False
self.logger.info(
"%s (%s) DISABLED archiving the temp channel for %s (%s)",
ctx.author.name,
ctx.author.id,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(
":negative_squared_cross_mark: TempChannel: Archiving disabled. "
" The channel will be deleted after its lifetime expires."
)
else:
guildData[KEY_ARCHIVE] = True
self.logger.info(
"%s (%s) ENABLED archiving the temp channel for %s (%s)",
ctx.author.name,
ctx.author.id,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(
":white_check_mark: TempChannel: Archiving enabled. The channel "
"will have ALL user permissions revoked after its lifetime "
"expires, and will be renamed with the date and time that it "
"was archived."
)
@tempChannels.command(name="toggle")
async def tempChannelsToggle(self, ctx: Context):
"""Toggle the creation/deletion of the temporary channel."""
async with self.config.guild(ctx.guild).all() as guildData:
if guildData[KEY_ENABLED]:
guildData[KEY_ENABLED] = False
self.logger.info(
"%s (%s) DISABLED the temp channel for %s (%s)",
ctx.author.name,
ctx.author.id,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(":negative_squared_cross_mark: TempChannel: Disabled.")
else:
guildData[KEY_ENABLED] = True
self.logger.info(
"%s (%s) ENABLED the temp channel for %s (%s)",
ctx.author.name,
ctx.author.id,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(":white_check_mark: TempChannel: Enabled.")
@tempChannels.command(name="nsfw")
async def tempChannelsNSFW(self, ctx: Context):
"""Toggle NSFW requirements."""
async with self.config.guild(ctx.guild).all() as guildData:
if guildData[KEY_NSFW]:
nsfw = False
self.logger.info(
"%s (%s) DISABLED the NSFW prompt for %s (%s)",
ctx.author.name,
ctx.author.id,
ctx.guild.name,
ctx.author.id,
)
await ctx.send(
":negative_squared_cross_mark: TempChannel: NSFW " "requirement disabled."
)
else:
guildData[KEY_NSFW] = True
self.logger.info(
"%s (%s) ENABLED the NSFW prompt for %s (%s)",
ctx.author.name,
ctx.author.id,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(":white_check_mark: TempChannel: NSFW " "requirement enabled.")
@tempChannels.command(name="start")
async def tempChannelsStart(self, ctx: Context, hour: int, minute: int):
"""Set the temp channel creation time. Use 24 hour time.
Parameters:
-----------
hour: int
The hour to start the temporary channel.
minute: int
The minute to start the temporary channel.
"""
if (hour > 23) or (hour < 0):
await ctx.send(
":negative_squared_cross_mark: TempChannel - Start "
"Time: Please enter a valid time."
)
return
if (minute > 59) or (minute < 0):
await ctx.send(
":negative_squared_cross_mark: TempChannel - Start "
"Time: Please enter a valid time."
)
return
async with self.config.guild(ctx.guild).all() as guildData:
guildData[KEY_START_HOUR] = hour
guildData[KEY_START_MIN] = minute
self.logger.info(
"%s (%s) set the start time to %002d:%002d on %s (%s)",
ctx.author.name,
ctx.author.id,
hour,
minute,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(
":white_check_mark: TempChannel - Start Time: Start time "
"set to {0:002d}:{1:002d}.".format(hour, minute)
)
@tempChannels.command(name="duration")
async def tempChannelsDuration(self, ctx: Context, hours: int, minutes: int):
"""Set the duration of the temp channel. Max 100 hours.
Parameters:
-----------
hours: int
Number of hours to make this channel available.
minutes: int
Number of minutes to make this channel available.
Example:
If hours = 1, and minutes = 3, then the channel will be available for
1 hour 3 minutes.
"""
if (hours >= 100) or (hours < 0):
await ctx.send(
":negative_squared_cross_mark: TempChannel - Duration: "
"Please enter valid hours!"
)
return
if (minutes >= 60) or (minutes < 0):
await ctx.send(
":negative_squared_cross_mark: TempChannel - Duration: "
"Please enter valid minutes!"
)
return
if (hours >= 99) and (minutes >= 60):
await ctx.send(
":negative_squared_cross_mark: TempChannel - Duration: "
"Please enter a valid duration!"
)
return
async with self.config.guild(ctx.guild).all() as guildData:
guildData[KEY_DURATION_HOURS] = hours
guildData[KEY_DURATION_MINS] = minutes
self.logger.info(
"%s (%s) set the duration to %s hours, %s minutes on %s (%s)",
ctx.author.name,
ctx.author.id,
hours,
minutes,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(
":white_check_mark: TempChannel - Duration: Duration set to "
"**{0} hours, {1} minutes**.".format(hours, minutes)
)
@tempChannels.command(name="topic")
async def tempChannelsTopic(self, ctx: Context, *, topic: str):
"""Set the topic of the channel.
Parameters:
-----------
topic: str
The topic of the channel.
"""
if len(topic) > MAX_CH_TOPIC:
await ctx.send(
":negative_squared_cross_mark: TempChannel - Topic: "
"Topic is too long. Try again."
)
return
await self.config.guild(ctx.guild).channelTopic.set(topic)
self.logger.info(
"%s (%s) set the channel topic to the following on %s (%s): %s",
ctx.author.name,
ctx.author.id,
ctx.guild.name,
ctx.guild.id,
topic,
)
await ctx.send(
":white_check_mark: TempChannel - Topic: Topic set to:\n" "```{0}```".format(topic)
)
@tempChannels.command(name="name")
async def tempChannelsName(self, ctx, name: str):
"""Set the #name of the channel.
Parameters:
-----------
name: str
The #name of the channel, which is shown on the left panel of Discord.
"""
if len(name) > MAX_CH_NAME:
await ctx.send(
":negative_squared_cross_mark: TempChannel - Name: "
"Name is too long. Try again."
)
return
await self.config.guild(ctx.guild).channelName.set(name)
self.logger.info(
"%s (%s) set the channel name to " "%s" " on %s (%s)",
ctx.author.name,
ctx.author.id,
name,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(
":white_check_mark: TempChannel - Name: Channel name set " "to: ``{0}``".format(name)
)
@tempChannels.command(name="position", aliases=["pos"])
async def tempChannelsPosition(self, ctx, position: int):
"""Set the position of the text channel in the list.
Parameters:
-----------
position: int
The position where you want the temp channel to appear on the channel
list.
"""
if position > MAX_CH_POS or position < 0:
await ctx.send(
":negative_squared_cross_mark: TempChannel - Position: "
"Invalid position. Try again."
)
return
await self.config.guild(ctx.guild).channelPosition.set(position)
self.logger.info(
"%s (%s) changed the position to %s on %s (%s)",
ctx.author.name,
ctx.author.id,
position,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(
":white_check_mark: TempChannel - Position: This channel "
"will be at position {0}".format(position)
)
@tempChannels.command(name="category", pass_context=True, no_pm=True)
async def tempChannelsCategory(
self, ctx: Context, *, category: discord.CategoryChannel = None
):
"""Set the parent category of the text channel.
Parameters:
-----------
category: discord.CategoryChannel
The category you wish to nest the temporary channel under.
"""
await self.config.guild(ctx.guild).channelCategory.set(category.id)
if not category:
self.logger.info(
"%s (%s) disabled category nesting on %s (%s)",
ctx.author.name,
ctx.author.id,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(
":white_check_mark: TempChannel - Category: Parent " "category disabled."
)
else:
self.logger.info(
"%s (%s) set the parent category ID to %s on %s (%s)",
ctx.author.name,
ctx.author.id,
category.id,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(
":white_check_mark: TempChannel - Category: Parent "
"category set to **{}**.".format(category.name)
)
@tempChannels.command(name="allowadd", aliases=["aa"])
async def tempChannelsAllowAdd(self, ctx: Context, *, role: discord.Role):
"""Add a role to allow access to the channel.
Parameters:
-----------
role: discord.Role
The role you wish to allow access to the temporary channel.
"""
async with self.config.guild(ctx.guild).roleAllow() as roleAllow:
if role.id not in roleAllow:
roleAllow.append(role.id)
self.logger.info(
"%s (%s) added role %s to the allow list on %s (%s)",
ctx.author.name,
ctx.author.id,
role.name,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(
":white_check_mark: TempChannel - Role Allow: **`{0}`"
"** will be allowed access.".format(role.name)
)
else:
await ctx.send(
":negative_squared_cross_mark: TempChannel - Role Allow: "
"**`{0}`** is already allowed.".format(role.name)
)
@tempChannels.command(name="allowremove", aliases=["allowdelete", "ad", "ar"])
async def tempChannelsAllowRemove(self, ctx: Context, *, role: discord.Role):
"""Remove a role from being able access the temporary channel.
Parameters:
-----------
role: discord.Role
The role you wish to remove access from.
"""
async with self.config.guild(ctx.guild).roleAllow() as roleAllow:
if not roleAllow or role.id not in roleAllow:
await ctx.send(
":negative_squared_cross_mark: TempChannel - Role Allow: "
"**`{0}`** wasn't on the list.".format(role.name)
)
else:
roleAllow.remove(role.id)
self.logger.info(
"%s (%s) removed role %s from the allow list on %s (%s)",
ctx.author.name,
ctx.author.id,
role.name,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(
":white_check_mark: TempChannel - Role Allow: **`{0}`** "
"removed from the list.".format(role.name)
)
@tempChannels.command(name="denyadd", aliases=["da"])
async def tempChannelsDenyAdd(self, ctx: Context, *, role: discord.Role):
"""Add a role to block sending message to the channel.
This role should be HIGHER in the role hierarchy than the roles in
the allowed list! The bot will not check for this.
Parameters:
-----------
role: discord.Role
The role you wish to deny sending permissions in the temporary channel.
"""
async with self.config.guild(ctx.guild).roleDeny() as roleDeny:
if role.id not in roleDeny:
roleDeny.append(role.id)
self.logger.info(
"%s (%s) added role %s to the deny list on %s (%s)",
ctx.author.name,
ctx.author.id,
role.name,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(
":white_check_mark: TempChannel - Role: **`{0}`** will "
"be denied sending, provided this role is higher "
"than any of the ones in the allowed list.".format(role.name)
)
else:
await ctx.send(
":negative_squared_cross_mark: TempChannel - Role Deny: "
"**`{0}`** is already denied.".format(role)
)
@tempChannels.command(name="denyremove", aliases=["denydelete", "dd", "dr"])
async def tempChannelsDenyRemove(self, ctx: Context, *, role: discord.Role):
"""Remove role from being blocked sending to the channel.
Parameters:
-----------
role: discord.Role
The role you wish to remove from the deny list.
"""
async with self.config.guild(ctx.guild).roleDeny() as roleDeny:
if not roleDeny or role.id not in roleDeny:
await ctx.send(
":negative_squared_cross_mark: TempChannel - Role Deny: "
"**`{0}`** wasn't on the list.".format(role.name)
)
else:
roleDeny.remove(role.id)
self.logger.info(
"%s (%s) removed role %s from the deny list on %s (%s)",
ctx.author.name,
ctx.author.id,
role.name,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(
":white_check_mark: TempChannel - Role Deny: **`{0}`** "
"removed from the list.".format(role.name)
)
@tempChannels.command(name="delete", aliases=["remove", "del", "rm"])
async def tempChannelsDelete(self, ctx: Context):
"""Deletes the temp channel, if it exists."""
async with self.config.guild(ctx.guild).all() as guildData:
if guildData[KEY_CH_CREATED] and guildData[KEY_CH_ID]:
# Channel created, see when we should delete it.
try:
chanObj = self.bot.get_channel(guildData[KEY_CH_ID])
await chanObj.delete()
except discord.DiscordException:
self.logger.error("Could not delete channel!", exc_info=True)
await ctx.send(
":warning: TempChannel: Something went wrong "
"while trying to delete the channel. Please "
"check the console log for details."
)
else:
guildData[KEY_CH_ID] = None
guildData[KEY_CH_CREATED] = False
self.logger.info(
"%s (%s) deleted the temp channel #%s (%s) in %s (%s).",
ctx.author.name,
ctx.author.id,
chanObj.name,
chanObj.id,
ctx.guild.name,
ctx.guild.id,
)
await ctx.send(":white_check_mark: TempChannel: Channel deleted")
else:
await ctx.send(
":negative_squared_cross_mark: TempChannel: There is no "
"temporary channel to delete!"
)
###################
# Background Loop #
###################
async def checkChannels(self): # pylint: disable=too-many-branches,too-many-statements
"""Loop to check whether or not we should create/delete the
TempChannel."""
while self == self.bot.get_cog("TempChannels"):
await asyncio.sleep(SLEEP_TIME)
# Create/maintain the channel during a valid time and duration, else
# delete it.
for guild in self.bot.guilds:
async with self.config.guild(guild).all() as guildData:
try:
if not guildData[KEY_ENABLED]:
continue
if (
int(time.strftime("%H")) == guildData[KEY_START_HOUR]
and int(time.strftime("%M")) == guildData[KEY_START_MIN]
and not guildData[KEY_CH_CREATED]
and not guildData[KEY_CH_ID]
):
# See if ALL of the following is satisfied.
# - It is the starting time.
# - The channel creation flag is not set.
# - The channel ID doesn't exist.
#
# If it is satisfied, let's create a channel, and then
# store the following in the settings:
# - Channel ID.
# - Time to delete channel.
# Start with permissions
# Always allow the bot to read.
permsDict = {self.bot.user: PERMS_READ_Y}
if guildData[KEY_ROLE_ALLOW]:
# If we have allow roles, automatically deny @everyone the "Read
# Messages" permission.
permsDict[guild.default_role] = PERMS_READ_N
for roleId in guildData[KEY_ROLE_ALLOW]:
role = discord.utils.get(guild.roles, id=roleId)
self.logger.debug("Allowed role %s", role)
if role:
permsDict[role] = deepcopy(PERMS_READ_Y)
# Check for deny permissions.
if guildData[KEY_ROLE_DENY]:
for roleId in guildData[KEY_ROLE_DENY]:
role = discord.utils.get(guild.roles, id=roleId)
self.logger.debug("Denied role %s", role)
if role and role not in permsDict.keys():
self.logger.debug("Role not in dict, adding")
permsDict[role] = deepcopy(PERMS_SEND_N)
elif role:
self.logger.debug("Updating role")
permsDict[role].update(send_messages=False)
self.logger.debug("Current permission overrides: \n%s", permsDict)
# Grab parent category. If not set, this will return None anyways.
category = None
if guildData[KEY_CH_CATEGORY]:
category = discord.utils.get(
guild.channels, id=guildData[KEY_CH_CATEGORY]
)
chanObj = await guild.create_text_channel(
guildData[KEY_CH_NAME],
overwrites=permsDict,
category=category,
position=guildData[KEY_CH_POS],
topic=guildData[KEY_CH_TOPIC],
nsfw=guildData[KEY_NSFW],
)
self.logger.info(
"Channel #%s (%s) in %s (%s) was created.",
chanObj.name,
chanObj.id,
guild.name,
guild.id,
)
guildData[KEY_CH_ID] = chanObj.id
# Set delete times, and save settings.
duration = (
guildData[KEY_DURATION_HOURS] * 60 * 60
+ guildData[KEY_DURATION_MINS] * 60
)
guildData[KEY_STOP_TIME] = time.time() + duration
guildData[KEY_CH_CREATED] = True
elif guildData[KEY_CH_CREATED]:
# Channel created, see when we should delete it.
if time.time() >= guildData[KEY_STOP_TIME]:
self.logger.debug(
"Past channel stop time, clearing ID " "and created keys."
)
chanObj = guild.get_channel(guildData[KEY_CH_ID])
guildData[KEY_CH_ID] = None
guildData[KEY_CH_CREATED] = False
if chanObj and guildData[KEY_ARCHIVE]:
await chanObj.set_permissions(
guild.default_role, overwrite=PERMS_READ_N
)
for role in guild.roles:
if role == guild.default_role:
continue
await chanObj.set_permissions(
role, overwrite=None, reason="Archiving tempchannel"
)
currentDate = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
await chanObj.edit(name=f"tc-{currentDate}")
self.logger.info(
"Channel #%s (%s) in %s (%s) was archived.",
chanObj.name,
chanObj.id,
guild.name,
guild.id,
)
elif chanObj and not guildData[KEY_ARCHIVE]:
await chanObj.delete()
self.logger.info(
"Channel #%s (%s) in %s (%s) was deleted.",
chanObj.name,
chanObj.id,
guild.name,
guild.id,
)
except Exception: # pylint: disable=broad-except
self.logger.error(
"Something went terribly wrong for server %s (%s)!",
guild.name,
guild.id,
exc_info=True,
)
| gpl-3.0 |
Laurawly/tvm-1 | tests/python/contrib/test_arm_compute_lib/test_network.py | 1 | 5647 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm Compute Library network tests."""
import numpy as np
import pytest
from tvm import testing
from tvm import relay
from test_arm_compute_lib.infrastructure import skip_runtime_test, build_and_run, verify
from test_arm_compute_lib.infrastructure import Device
def _build_and_run_network(mod, params, inputs, device, tvm_ops, acl_partitions, atol, rtol):
"""Helper function to build and run a network."""
data = {}
np.random.seed(0)
for name, (shape, dtype) in inputs.items():
if dtype == "uint8":
low, high = 0, 255
else:
low, high = -127, 128
data[name] = np.random.uniform(low, high, shape).astype(dtype)
outputs = []
for acl in [False, True]:
outputs.append(
build_and_run(
mod,
data,
1,
params,
device,
enable_acl=acl,
tvm_ops=tvm_ops,
acl_partitions=acl_partitions,
)[0]
)
verify(outputs, atol=atol, rtol=rtol, verify_saturation=False)
def _get_tflite_model(tflite_model_path, inputs_dict):
"""Convert TFlite graph to relay."""
import tflite.Model
with open(tflite_model_path, "rb") as f:
tflite_model_buffer = f.read()
try:
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buffer, 0)
except AttributeError:
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buffer, 0)
shape_dict = {}
dtype_dict = {}
for input in inputs_dict:
input_shape, input_dtype = inputs_dict[input]
shape_dict[input] = input_shape
dtype_dict[input] = input_dtype
return relay.frontend.from_tflite(tflite_model, shape_dict=shape_dict, dtype_dict=dtype_dict)
def _get_keras_model(keras_model, inputs_dict):
"""Convert Keras graph to relay."""
inputs = {}
for name, (shape, _) in inputs_dict.items():
inputs[keras_model.input_names[0]] = shape
return relay.frontend.from_keras(keras_model, inputs, layout="NHWC")
def test_vgg16():
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
def get_model():
from keras.applications import VGG16
vgg16 = VGG16(include_top=True, weights="imagenet", input_shape=(224, 224, 3), classes=1000)
inputs = {vgg16.input_names[0]: ((1, 224, 224, 3), "float32")}
mod, params = _get_keras_model(vgg16, inputs)
return mod, params, inputs
_build_and_run_network(
*get_model(), device=device, tvm_ops=4, acl_partitions=21, atol=0.002, rtol=0.01
)
def test_mobilenet():
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
def get_model():
from keras.applications import MobileNet
mobilenet = MobileNet(
include_top=True, weights="imagenet", input_shape=(224, 224, 3), classes=1000
)
inputs = {mobilenet.input_names[0]: ((1, 224, 224, 3), "float32")}
mod, params = _get_keras_model(mobilenet, inputs)
return mod, params, inputs
_build_and_run_network(
*get_model(), device=device, tvm_ops=56, acl_partitions=31, atol=0.002, rtol=0.01
)
def test_quantized_mobilenet():
Device.load("test_config.json")
if skip_runtime_test():
return
import tvm.relay.testing.tf as tf_testing
device = Device()
def get_model():
model_path = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/"
"models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"mobilenet_v1_1.0_224_quant.tflite",
)
inputs = {"input": ((1, 224, 224, 3), "uint8")}
mod, params = _get_tflite_model(model_path, inputs_dict=inputs)
return mod, params, inputs
_build_and_run_network(
*get_model(), device=device, tvm_ops=3, acl_partitions=30, atol=9, rtol=0
)
def test_squeezenet():
Device.load("test_config.json")
if skip_runtime_test():
return
import tvm.relay.testing.tf as tf_testing
device = Device()
def get_model():
model_path = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz",
"squeezenet.tflite",
)
inputs = {"Placeholder": ((1, 224, 224, 3), "float32")}
mod, params = _get_tflite_model(model_path, inputs_dict=inputs)
return mod, params, inputs
_build_and_run_network(
*get_model(), device=device, tvm_ops=9, acl_partitions=31, atol=8, rtol=0
)
if __name__ == "__main__":
test_vgg16()
test_mobilenet()
test_quantized_mobilenet()
test_squeezenet()
| apache-2.0 |
ishanic/scikit-learn | sklearn/covariance/shrunk_covariance_.py | 209 | 18133 | """
Covariance estimators using shrinkage.
Shrinkage corresponds to regularising `cov` using a convex combination:
shrunk_cov = (1-shrinkage)*cov + shrinkage*structured_estimate.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
# avoid division truncation
from __future__ import division
import warnings
import numpy as np
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance
from ..externals.six.moves import xrange
from ..utils import check_array
# ShrunkCovariance estimator
def shrunk_covariance(emp_cov, shrinkage=0.1):
"""Calculates a covariance matrix shrunk on the diagonal
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
emp_cov : array-like, shape (n_features, n_features)
Covariance matrix to be shrunk
shrinkage : float, 0 <= shrinkage <= 1
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Returns
-------
shrunk_cov : array-like
Shrunk covariance.
Notes
-----
The regularized (shrunk) covariance is given by
(1 - shrinkage)*cov
+ shrinkage*mu*np.identity(n_features)
where mu = trace(cov) / n_features
"""
emp_cov = check_array(emp_cov)
n_features = emp_cov.shape[0]
mu = np.trace(emp_cov) / n_features
shrunk_cov = (1. - shrinkage) * emp_cov
shrunk_cov.flat[::n_features + 1] += shrinkage * mu
return shrunk_cov
class ShrunkCovariance(EmpiricalCovariance):
"""Covariance estimator with shrinkage
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
store_precision : boolean, default True
Specify if the estimated precision is stored
shrinkage : float, 0 <= shrinkage <= 1, default 0.1
Coefficient in the convex combination used for the computation
of the shrunk estimate.
assume_centered : boolean, default False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
`shrinkage` : float, 0 <= shrinkage <= 1
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularized covariance is given by
(1 - shrinkage)*cov
+ shrinkage*mu*np.identity(n_features)
where mu = trace(cov) / n_features
"""
def __init__(self, store_precision=True, assume_centered=False,
shrinkage=0.1):
EmpiricalCovariance.__init__(self, store_precision=store_precision,
assume_centered=assume_centered)
self.shrinkage = shrinkage
def fit(self, X, y=None):
""" Fits the shrunk covariance model
according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
# Not calling the parent object to fit, to avoid a potential
# matrix inversion when setting the precision
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(
X, assume_centered=self.assume_centered)
covariance = shrunk_covariance(covariance, self.shrinkage)
self._set_covariance(covariance)
return self
# Ledoit-Wolf estimator
def ledoit_wolf_shrinkage(X, assume_centered=False, block_size=1000):
"""Estimates the shrunk Ledoit-Wolf covariance matrix.
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data from which to compute the Ledoit-Wolf shrunk covariance shrinkage.
assume_centered : Boolean
If True, data are not centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data are centered before computation.
block_size : int
Size of the blocks into which the covariance matrix will be split.
Returns
-------
shrinkage: float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularized (shrunk) covariance is:
(1 - shrinkage)*cov
+ shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
"""
X = np.asarray(X)
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
return 0.
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# optionaly center data
if not assume_centered:
X = X - X.mean(0)
# number of blocks to split the covariance matrix into
n_splits = int(n_features / block_size)
X2 = X ** 2
emp_cov_trace = np.sum(X2, axis=0) / n_samples
mu = np.sum(emp_cov_trace) / n_features
beta_ = 0. # sum of the coefficients of <X2.T, X2>
delta_ = 0. # sum of the *squared* coefficients of <X.T, X>
# starting block computation
for i in xrange(n_splits):
for j in xrange(n_splits):
rows = slice(block_size * i, block_size * (i + 1))
cols = slice(block_size * j, block_size * (j + 1))
beta_ += np.sum(np.dot(X2.T[rows], X2[:, cols]))
delta_ += np.sum(np.dot(X.T[rows], X[:, cols]) ** 2)
rows = slice(block_size * i, block_size * (i + 1))
beta_ += np.sum(np.dot(X2.T[rows], X2[:, block_size * n_splits:]))
delta_ += np.sum(
np.dot(X.T[rows], X[:, block_size * n_splits:]) ** 2)
for j in xrange(n_splits):
cols = slice(block_size * j, block_size * (j + 1))
beta_ += np.sum(np.dot(X2.T[block_size * n_splits:], X2[:, cols]))
delta_ += np.sum(
np.dot(X.T[block_size * n_splits:], X[:, cols]) ** 2)
delta_ += np.sum(np.dot(X.T[block_size * n_splits:],
X[:, block_size * n_splits:]) ** 2)
delta_ /= n_samples ** 2
beta_ += np.sum(np.dot(X2.T[block_size * n_splits:],
X2[:, block_size * n_splits:]))
# use delta_ to compute beta
beta = 1. / (n_features * n_samples) * (beta_ / n_samples - delta_)
# delta is the sum of the squared coefficients of (<X.T,X> - mu*Id) / p
delta = delta_ - 2. * mu * emp_cov_trace.sum() + n_features * mu ** 2
delta /= n_features
# get final beta as the min between beta and delta
beta = min(beta, delta)
# finally get shrinkage
shrinkage = 0 if beta == 0 else beta / delta
return shrinkage
def ledoit_wolf(X, assume_centered=False, block_size=1000):
"""Estimates the shrunk Ledoit-Wolf covariance matrix.
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : boolean, default=False
If True, data are not centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data are centered before computation.
block_size : int, default=1000
Size of the blocks into which the covariance matrix will be split.
This is purely a memory optimization and does not affect results.
Returns
-------
shrunk_cov : array-like, shape (n_features, n_features)
Shrunk covariance.
shrinkage : float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularized (shrunk) covariance is:
(1 - shrinkage)*cov
+ shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
"""
X = np.asarray(X)
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
if not assume_centered:
X = X - X.mean()
return np.atleast_2d((X ** 2).mean()), 0.
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples = 1
n_features = X.size
else:
n_samples, n_features = X.shape
# get Ledoit-Wolf shrinkage
shrinkage = ledoit_wolf_shrinkage(
X, assume_centered=assume_centered, block_size=block_size)
emp_cov = empirical_covariance(X, assume_centered=assume_centered)
mu = np.sum(np.trace(emp_cov)) / n_features
shrunk_cov = (1. - shrinkage) * emp_cov
shrunk_cov.flat[::n_features + 1] += shrinkage * mu
return shrunk_cov, shrinkage
class LedoitWolf(EmpiricalCovariance):
"""LedoitWolf Estimator
Ledoit-Wolf is a particular form of shrinkage, where the shrinkage
coefficient is computed using O. Ledoit and M. Wolf's formula as
described in "A Well-Conditioned Estimator for Large-Dimensional
Covariance Matrices", Ledoit and Wolf, Journal of Multivariate
Analysis, Volume 88, Issue 2, February 2004, pages 365-411.
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
store_precision : bool, default=True
Specify if the estimated precision is stored.
assume_centered : bool, default=False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
block_size : int, default=1000
Size of the blocks into which the covariance matrix will be split
during its Ledoit-Wolf estimation. This is purely a memory
optimization and does not affect results.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
shrinkage_ : float, 0 <= shrinkage <= 1
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularised covariance is::
(1 - shrinkage)*cov
+ shrinkage*mu*np.identity(n_features)
where mu = trace(cov) / n_features
and shrinkage is given by the Ledoit and Wolf formula (see References)
References
----------
"A Well-Conditioned Estimator for Large-Dimensional Covariance Matrices",
Ledoit and Wolf, Journal of Multivariate Analysis, Volume 88, Issue 2,
February 2004, pages 365-411.
"""
def __init__(self, store_precision=True, assume_centered=False,
block_size=1000):
EmpiricalCovariance.__init__(self, store_precision=store_precision,
assume_centered=assume_centered)
self.block_size = block_size
def fit(self, X, y=None):
""" Fits the Ledoit-Wolf shrunk covariance model
according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
# Not calling the parent object to fit, to avoid computing the
# covariance matrix (and potentially the precision)
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance, shrinkage = ledoit_wolf(X - self.location_,
assume_centered=True,
block_size=self.block_size)
self.shrinkage_ = shrinkage
self._set_covariance(covariance)
return self
# OAS estimator
def oas(X, assume_centered=False):
"""Estimate covariance with the Oracle Approximating Shrinkage algorithm.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
assume_centered : boolean
If True, data are not centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data are centered before computation.
Returns
-------
shrunk_cov : array-like, shape (n_features, n_features)
Shrunk covariance.
shrinkage : float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularised (shrunk) covariance is:
(1 - shrinkage)*cov
+ shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
The formula we used to implement the OAS
does not correspond to the one given in the article. It has been taken
from the MATLAB program available from the author's webpage
(https://tbayes.eecs.umich.edu/yilun/covestimation).
"""
X = np.asarray(X)
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
if not assume_centered:
X = X - X.mean()
return np.atleast_2d((X ** 2).mean()), 0.
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples = 1
n_features = X.size
else:
n_samples, n_features = X.shape
emp_cov = empirical_covariance(X, assume_centered=assume_centered)
mu = np.trace(emp_cov) / n_features
# formula from Chen et al.'s **implementation**
alpha = np.mean(emp_cov ** 2)
num = alpha + mu ** 2
den = (n_samples + 1.) * (alpha - (mu ** 2) / n_features)
shrinkage = 1. if den == 0 else min(num / den, 1.)
shrunk_cov = (1. - shrinkage) * emp_cov
shrunk_cov.flat[::n_features + 1] += shrinkage * mu
return shrunk_cov, shrinkage
class OAS(EmpiricalCovariance):
"""Oracle Approximating Shrinkage Estimator
Read more in the :ref:`User Guide <shrunk_covariance>`.
OAS is a particular form of shrinkage described in
"Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
The formula used here does not correspond to the one given in the
article. It has been taken from the Matlab program available from the
authors' webpage (https://tbayes.eecs.umich.edu/yilun/covestimation).
Parameters
----------
store_precision : bool, default=True
Specify if the estimated precision is stored.
assume_centered: bool, default=False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix.
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
shrinkage_ : float, 0 <= shrinkage <= 1
coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularised covariance is::
(1 - shrinkage)*cov
+ shrinkage*mu*np.identity(n_features)
where mu = trace(cov) / n_features
and shrinkage is given by the OAS formula (see References)
References
----------
"Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
def fit(self, X, y=None):
""" Fits the Oracle Approximating Shrinkage covariance model
according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self: object
Returns self.
"""
X = check_array(X)
# Not calling the parent object to fit, to avoid computing the
# covariance matrix (and potentially the precision)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance, shrinkage = oas(X - self.location_, assume_centered=True)
self.shrinkage_ = shrinkage
self._set_covariance(covariance)
return self
| bsd-3-clause |
mbauskar/Das_frappe | frappe/website/doctype/web_page/web_page.py | 27 | 6079 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, re, os, json, imp
import requests, requests.exceptions
from frappe.website.website_generator import WebsiteGenerator
from frappe.website.router import resolve_route
from frappe.website.doctype.website_slideshow.website_slideshow import get_slideshow
from frappe.website.utils import find_first_image, get_comment_list
from markdown2 import markdown
from frappe.utils.jinja import render_template
from jinja2.exceptions import TemplateSyntaxError
from frappe.utils import strip_html
class WebPage(WebsiteGenerator):
save_versions = True
website = frappe._dict(
template = "templates/generators/web_page.html",
condition_field = "published",
page_title_field = "title",
parent_website_route_field = "parent_web_page"
)
def get_feed(self):
return self.title
def validate(self):
if self.template_path and not getattr(self, "from_website_sync"):
frappe.throw(frappe._("Cannot edit templated page"))
super(WebPage, self).validate()
def get_context(self, context):
# if static page, get static content
if context.slideshow:
context.update(get_slideshow(self))
if self.enable_comments:
context.comment_list = get_comment_list(self.doctype, self.name)
# for sidebar and breadcrumbs
context.children = self.get_children()
context.parents = self.get_parents(context)
if self.template_path:
# render dynamic context (if .py file exists)
# get absolute template path considering first fragment as app name
split_path = self.template_path.split(os.sep)
self.template_path = os.path.join(frappe.get_app_path(split_path[0]), *split_path[1:])
context = self.get_dynamic_context(frappe._dict(context))
# load content from template
self.get_static_content(context)
else:
context.update({
"style": self.css or "",
"script": self.javascript or "",
"header": self.header,
"title": self.title,
"text_align": self.text_align,
})
if self.description:
context.setdefault("metatags", {})["description"] = self.description
if not self.show_title:
context["no_header"] = 1
self.set_metatags(context)
return context
def render_dynamic(self, context):
# dynamic
is_jinja = "<!-- jinja -->" in context.main_section
if is_jinja or ("{{" in context.main_section):
try:
context["main_section"] = render_template(context.main_section,
context)
if not "<!-- static -->" in context.main_section:
context["no_cache"] = 1
except TemplateSyntaxError:
if is_jinja:
raise
def get_static_content(self, context):
with open(self.template_path, "r") as contentfile:
content = unicode(contentfile.read(), 'utf-8')
if self.template_path.endswith(".md"):
if content:
lines = content.splitlines()
first_line = lines[0].strip()
if first_line.startswith("# "):
context.title = first_line[2:]
content = "\n".join(lines[1:])
content = markdown(content)
context.main_section = unicode(content.encode("utf-8"), 'utf-8')
self.check_for_redirect(context)
if not context.title:
context.title = self.name.replace("-", " ").replace("_", " ").title()
self.render_dynamic(context)
for extn in ("js", "css"):
fpath = self.template_path.rsplit(".", 1)[0] + "." + extn
if os.path.exists(fpath):
with open(fpath, "r") as f:
context["style" if extn=="css" else "script"] = f.read()
def check_for_redirect(self, context):
if "<!-- redirect:" in context.main_section:
frappe.local.flags.redirect_location = \
context.main_section.split("<!-- redirect:")[1].split("-->")[0].strip()
raise frappe.Redirect
def get_dynamic_context(self, context):
"update context from `.py` and load sidebar from `_sidebar.json` if either exists"
basename = os.path.basename(self.template_path).rsplit(".", 1)[0]
module_path = os.path.join(os.path.dirname(self.template_path),
frappe.scrub(basename) + ".py")
if os.path.exists(module_path):
module = imp.load_source(basename, module_path)
if hasattr(module, "get_context"):
ret = module.get_context(context)
if ret:
context = ret
# sidebar?
sidebar_path = os.path.join(os.path.dirname(self.template_path), "_sidebar.json")
if os.path.exists(sidebar_path):
with open(sidebar_path, "r") as f:
context.children = json.loads(f.read())
return context
def set_metatags(self, context):
context.metatags = {
"name": context.title,
"description": (context.description or context.main_section or "").replace("\n", " ")[:500]
}
image = find_first_image(context.main_section or "")
if image:
context.metatags["image"] = image
# def get_list_context(context=None):
# list_context = frappe._dict(
# title = _("Website Search"),
# template = "templates/includes/kb_list.html",
# row_template = "templates/includes/kb_row.html",
# get_level_class = get_level_class,
# hide_filters = True,
# filters = {"published": 1}
# )
#
# if frappe.local.form_dict.txt:
# list_context.subtitle = _('Filtered by "{0}"').format(frappe.local.form_dict.txt)
# #
# # list_context.update(frappe.get_doc("Blog Settings", "Blog Settings").as_dict())
# return list_context
def check_broken_links():
cnt = 0
for p in frappe.db.sql("select name, main_section from `tabWeb Page`", as_dict=True):
for link in re.findall('href=["\']([^"\']*)["\']', p.main_section):
if link.startswith("http"):
try:
res = requests.get(link)
except requests.exceptions.SSLError:
res = frappe._dict({"status_code": "SSL Error"})
except requests.exceptions.ConnectionError:
res = frappe._dict({"status_code": "Connection Error"})
if res.status_code!=200:
print "[{0}] {1}: {2}".format(res.status_code, p.name, link)
cnt += 1
else:
link = link[1:] # remove leading /
link = link.split("#")[0]
if not resolve_route(link):
print p.name + ":" + link
cnt += 1
print "{0} links broken".format(cnt)
| mit |
thiagoss/splitencoder | splitencoder/utils.py | 1 | 1149 | # splitencoder
#
# Copyright (C) 2015 Samsung Electronics. All rights reserved.
# Author: Thiago Santos <thiagoss@osg.samsung.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301, USA.
#
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
def caps_is_video(caps):
s = caps.get_structure(0)
return s.get_name().startswith('video/')
def caps_is_audio(caps):
s = caps.get_structure(0)
return s.get_name().startswith('audio/')
| lgpl-2.1 |
FireballDWF/cloud-custodian | tools/c7n_guardian/c7n_guardian/cli.py | 5 | 14322 | # Copyright 2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import operator
import boto3
from botocore.exceptions import ClientError
from concurrent.futures import as_completed
import click
from tabulate import tabulate
from c7n.credentials import assumed_session, SessionFactory
from c7n.utils import format_event, chunks
from c7n_org.cli import init, filter_accounts, CONFIG_SCHEMA, WORKER_COUNT
log = logging.getLogger('c7n-guardian')
# make email required in org schema
CONFIG_SCHEMA['definitions']['account']['properties']['email'] = {'type': 'string'}
for el in CONFIG_SCHEMA['definitions']['account']['anyOf']:
el['required'].append('email')
@click.group()
def cli():
"""Automate Guard Duty Setup."""
@cli.command()
@click.option('-c', '--config',
required=True, help="Accounts config file", type=click.Path())
@click.option('-t', '--tags', multiple=True, default=None)
@click.option('-a', '--accounts', multiple=True, default=None)
@click.option('--master', help='Master account id or name')
@click.option('--debug', help='Run single-threaded', is_flag=True)
@click.option('--region', default='us-east-1')
def report(config, tags, accounts, master, debug, region):
"""report on guard duty enablement by account"""
accounts_config, master_info, executor = guardian_init(
config, debug, master, accounts, tags)
session = get_session(
master_info.get('role'), 'c7n-guardian',
master_info.get('profile'),
region)
client = session.client('guardduty')
detector_id = get_or_create_detector_id(client)
members = {m['AccountId']: m for m in
client.list_members(DetectorId=detector_id).get('Members')}
accounts_report = []
for a in accounts_config['accounts']:
ar = dict(a)
accounts_report.append(ar)
ar.pop('tags', None)
ar.pop('role')
ar.pop('regions', None)
if a['account_id'] not in members:
ar['member'] = False
ar['status'] = None
ar['invited'] = None
ar['updated'] = datetime.datetime.now().isoformat()
continue
m = members[a['account_id']]
ar['status'] = m['RelationshipStatus']
ar['member'] = True
ar['joined'] = m['InvitedAt']
ar['updated'] = m['UpdatedAt']
accounts_report.sort(key=operator.itemgetter('updated'), reverse=True)
print(tabulate(accounts_report, headers=('keys')))
@cli.command()
@click.option('-c', '--config',
required=True, help="Accounts config file", type=click.Path())
@click.option('-t', '--tags', multiple=True, default=None)
@click.option('-a', '--accounts', multiple=True, default=None)
@click.option('--master', help='Master account id or name')
@click.option('--debug', help='Run single-threaded', is_flag=True)
@click.option('--suspend', help='Suspend monitoring in master', is_flag=True)
@click.option('--disable-detector', help='Disable detector in member account',
is_flag=True)
@click.option('--delete-detector', help='Disable detector in member account',
is_flag=True)
@click.option('--dissociate', help='Disassociate member account',
is_flag=True)
@click.option('--region')
def disable(config, tags, accounts, master, debug,
suspend, disable_detector, delete_detector, dissociate, region):
"""suspend guard duty in the given accounts."""
accounts_config, master_info, executor = guardian_init(
config, debug, master, accounts, tags)
if sum(map(int, (suspend, disable_detector, dissociate))) != 1:
raise ValueError((
"One and only of suspend, disable-detector, dissociate"
"can be specified."))
master_session = get_session(
master_info['role'], 'c7n-guardian',
master_info.get('profile'), region)
master_client = master_session.client('guardduty')
detector_id = get_or_create_detector_id(master_client)
if suspend:
unprocessed = master_client.stop_monitoring_members(
DetectorId=detector_id,
AccountIds=[a['account_id'] for a in accounts_config['accounts']]
).get('UnprocessedAccounts', ())
if unprocessed:
log.warning(
"Following accounts where unprocessed\n %s",
format_event(unprocessed))
log.info("Stopped monitoring %d accounts in master",
len(accounts_config['accounts']))
return
if dissociate:
master_client.disassociate_members(
DetectorId=detector_id,
AccountIds=[a['account_id'] for a in accounts_config['accounts']])
# Seems like there's a couple of ways to disable an account
# delete the detector (member), disable the detector (master or member),
# or disassociate members, or from member disassociate from master.
for a in accounts_config['accounts']:
member_session = get_session(
a['role'], 'c7n-guardian',
a.get('profile'), region)
member_client = member_session.client('guardduty')
m_detector_id = get_or_create_detector_id(member_client)
if disable_detector:
member_client.update_detector(
DetectorId=m_detector_id, Enable=False)
log.info("Disabled detector in account:%s", a['name'])
if dissociate:
try:
log.info("Disassociated member account:%s", a['name'])
result = member_client.disassociate_from_master_account(
DetectorId=m_detector_id)
log.info("Result %s", format_event(result))
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidInputException':
continue
if delete_detector:
member_client.delete_detector(DetectorId=m_detector_id)
log.info("Deleted detector in account:%s", a['name'])
def get_session(role, session_name, profile, region):
if role:
return assumed_session(role, session_name, region=region)
else:
return SessionFactory(region, profile)()
def expand_regions(regions, partition='aws'):
if 'all' in regions:
regions = boto3.Session().get_available_regions('ec2')
return regions
@cli.command()
@click.option('-c', '--config',
required=True, help="Accounts config file", type=click.Path())
@click.option('--master', help='Master account id or name')
@click.option('-a', '--accounts', multiple=True, default=None)
@click.option('-t', '--tags', multiple=True, default=None)
@click.option('--debug', help='Run single-threaded', is_flag=True)
@click.option('--message', help='Welcome Message for member accounts')
@click.option(
'-r', '--region',
default=['all'], help='Region to enable (default: all)',
multiple=True)
def enable(config, master, tags, accounts, debug, message, region):
"""enable guard duty on a set of accounts"""
accounts_config, master_info, executor = guardian_init(
config, debug, master, accounts, tags)
regions = expand_regions(region)
for r in regions:
log.info("Processing Region:%s", r)
enable_region(master_info, accounts_config, executor, message, r)
def enable_region(master_info, accounts_config, executor, message, region):
master_session = get_session(
master_info.get('role'), 'c7n-guardian',
master_info.get('profile'),
region=region)
master_client = master_session.client('guardduty')
detector_id = get_or_create_detector_id(master_client)
results = master_client.get_paginator(
'list_members').paginate(DetectorId=detector_id, OnlyAssociated="FALSE")
extant_members = results.build_full_result().get('Members', ())
extant_ids = {m['AccountId'] for m in extant_members}
# Find active members
active_ids = {m['AccountId'] for m in extant_members
if m['RelationshipStatus'] == 'Enabled'}
# Find invited members
invited_ids = {m['AccountId'] for m in extant_members
if m['RelationshipStatus'] == 'Invited'}
# Find extant members not currently enabled
suspended_ids = {m['AccountId'] for m in extant_members
if m['RelationshipStatus'] == 'Disabled'}
# Filter by accounts under consideration per config and cli flags
suspended_ids = {a['account_id'] for a in accounts_config['accounts']
if a['account_id'] in suspended_ids}
if suspended_ids:
unprocessed = master_client.start_monitoring_members(
DetectorId=detector_id,
AccountIds=list(suspended_ids)).get('UnprocessedAccounts')
if unprocessed:
log.warning(
"Region: %s Unprocessed accounts on re-start monitoring %s",
region, format_event(unprocessed))
log.info("Region: %s Restarted monitoring on %d accounts",
region, len(suspended_ids))
members = [{'AccountId': account['account_id'], 'Email': account['email']}
for account in accounts_config['accounts']
if account['account_id'] not in extant_ids]
if not members:
if not suspended_ids and not invited_ids:
log.info("Region:%s All accounts already enabled", region)
return list(active_ids)
if (len(members) + len(extant_ids)) > 1000:
raise ValueError(
("Region:%s Guard Duty only supports "
"1000 member accounts per master account") % (region))
log.info(
"Region:%s Enrolling %d accounts in guard duty", region, len(members))
unprocessed = []
for account_set in chunks(members, 25):
unprocessed.extend(master_client.create_members(
DetectorId=detector_id,
AccountDetails=account_set).get('UnprocessedAccounts', []))
if unprocessed:
log.warning(
"Region:%s accounts where unprocessed - member create\n %s",
region, format_event(unprocessed))
log.info("Region:%s Inviting %d member accounts", region, len(members))
unprocessed = []
for account_set in chunks(
[m for m in members if not m['AccountId'] in invited_ids], 25):
params = {'AccountIds': [m['AccountId'] for m in account_set],
'DetectorId': detector_id}
if message:
params['Message'] = message
unprocessed.extend(master_client.invite_members(
**params).get('UnprocessedAccounts', []))
if unprocessed:
log.warning(
"Region:%s accounts where unprocessed invite-members\n %s",
region, format_event(unprocessed))
members = [{'AccountId': account['account_id'], 'Email': account['email']}
for account in accounts_config['accounts']
if account['account_id'] not in active_ids]
log.info("Region:%s Accepting %d invitations in members", region, len(members))
with executor(max_workers=WORKER_COUNT) as w:
futures = {}
for a in accounts_config['accounts']:
if a == master_info:
continue
if a['account_id'] in active_ids:
continue
futures[w.submit(enable_account, a, master_info['account_id'], region)] = a
for f in as_completed(futures):
a = futures[f]
if f.exception():
log.error("Region:%s Error processing account:%s error:%s",
region, a['name'], f.exception())
continue
if f.result():
log.info('Region:%s Enabled guard duty on account:%s',
region, a['name'])
return members
def enable_account(account, master_account_id, region):
member_session = get_session(
account.get('role'), 'c7n-guardian',
profile=account.get('profile'),
region=region)
member_client = member_session.client('guardduty')
m_detector_id = get_or_create_detector_id(member_client)
all_invitations = member_client.list_invitations().get('Invitations', [])
invitations = [
i for i in all_invitations
if i['AccountId'] == master_account_id]
invitations.sort(key=operator.itemgetter('InvitedAt'))
if not invitations:
log.warning(
"Region:%s No guard duty invitation found account:%s id:%s aid:%s",
region, account['name'], m_detector_id, account['account_id'])
return
member_client.accept_invitation(
DetectorId=m_detector_id,
InvitationId=invitations[-1]['InvitationId'],
MasterId=master_account_id)
return True
def get_or_create_detector_id(client):
detectors = client.list_detectors().get('DetectorIds')
if detectors:
return detectors[0]
else:
return client.create_detector(Enable=True).get('DetectorId')
def get_master_info(accounts_config, master):
master_info = None
for a in accounts_config['accounts']:
if a['name'] == master:
master_info = a
break
if a['account_id'] == master:
master_info = a
break
if master_info is None:
raise ValueError("Master account: %s not found in accounts config" % (
master))
return master_info
def guardian_init(config, debug, master, accounts, tags):
accounts_config, custodian_config, executor = init(
config, None, debug, False, None, None, None, None)
master_info = get_master_info(accounts_config, master)
filter_accounts(accounts_config, tags, accounts, not_accounts=[master_info['name']])
return accounts_config, master_info, executor
# AccountSet
#
# get master invitation
# get detectors
# delete detector
# disassociate from master
| apache-2.0 |
sameetb-cuelogic/edx-platform-test | common/test/acceptance/pages/lms/progress.py | 182 | 3827 | """
Student progress page
"""
from .course_page import CoursePage
class ProgressPage(CoursePage):
"""
Student progress page.
"""
url_path = "progress"
def is_browser_on_page(self):
is_present = (
self.q(css='div.course-info').present and
self.q(css='div#grade-detail-graph').present
)
return is_present
@property
def grading_formats(self):
return [label.replace(' Scores:', '') for label in self.q(css="div.scores h3").text]
def scores(self, chapter, section):
"""
Return a list of (points, max_points) tuples representing the scores
for the section.
Example:
scores('Week 1', 'Lesson 1') --> [(2, 4), (0, 1)]
Returns `None` if no such chapter and section can be found.
"""
# Find the index of the section in the chapter
chapter_index = self._chapter_index(chapter)
if chapter_index is None:
return None
section_index = self._section_index(chapter_index, section)
if section_index is None:
return None
# Retrieve the scores for the section
return self._section_scores(chapter_index, section_index)
def _chapter_index(self, title):
"""
Return the CSS index of the chapter with `title`.
Returns `None` if it cannot find such a chapter.
"""
chapter_css = 'div.chapters section h2'
chapter_titles = self.q(css=chapter_css).map(lambda el: el.text.lower().strip()).results
try:
# CSS indices are 1-indexed, so add one to the list index
return chapter_titles.index(title.lower()) + 1
except ValueError:
self.warning("Could not find chapter '{0}'".format(title))
return None
def _section_index(self, chapter_index, title):
"""
Return the CSS index of the section with `title` in the chapter at `chapter_index`.
Returns `None` if it can't find such a section.
"""
# This is a hideous CSS selector that means:
# Get the links containing the section titles in `chapter_index`.
# The link text is the section title.
section_css = 'div.chapters>section:nth-of-type({0}) div.sections div h3 a'.format(chapter_index)
section_titles = self.q(css=section_css).map(lambda el: el.text.lower().strip()).results
# The section titles also contain "n of m possible points" on the second line
# We have to remove this to find the right title
section_titles = [t.split('\n')[0] for t in section_titles]
# Some links are blank, so remove them
section_titles = [t for t in section_titles if t]
try:
# CSS indices are 1-indexed, so add one to the list index
return section_titles.index(title.lower()) + 1
except ValueError:
self.warning("Could not find section '{0}'".format(title))
return None
def _section_scores(self, chapter_index, section_index):
"""
Return a list of `(points, max_points)` tuples representing
the scores in the specified chapter and section.
`chapter_index` and `section_index` start at 1.
"""
# This is CSS selector means:
# Get the scores for the chapter at `chapter_index` and the section at `section_index`
# Example text of the retrieved elements: "0/1"
score_css = "div.chapters>section:nth-of-type({0}) div.sections>div:nth-of-type({1}) div.scores>ol>li".format(
chapter_index, section_index
)
text_scores = self.q(css=score_css).text
# Convert text scores to tuples of (points, max_points)
return [tuple(map(int, score.split('/'))) for score in text_scores]
| agpl-3.0 |
ppwwyyxx/tensorflow | tensorflow/python/data/experimental/ops/stats_options.py | 20 | 2581 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""StatsOptions to configure stats aggregation options for `tf.data` pipelines.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import stats_aggregator
from tensorflow.python.data.util import options
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.StatsOptions")
class StatsOptions(options.OptionsBase):
"""Represents options for collecting dataset stats using `StatsAggregator`.
You can set the stats options of a dataset through the `experimental_stats`
property of `tf.data.Options`; the property is an instance of
`tf.data.experimental.StatsOptions`. For example, to collect latency stats
on all dataset edges, use the following pattern:
```python
aggregator = tf.data.experimental.StatsAggregator()
options = tf.data.Options()
options.experimental_stats.aggregator = aggregator
options.experimental_stats.latency_all_edges = True
dataset = dataset.with_options(options)
```
"""
aggregator = options.create_option(
name="aggregator",
ty=(stats_aggregator.StatsAggregatorV2,
stats_aggregator.StatsAggregatorV1),
docstring=
"Associates the given statistics aggregator with the dataset pipeline.")
prefix = options.create_option(
name="prefix",
ty=str,
docstring=
"Prefix to prepend all statistics recorded for the input `dataset` with.",
default_factory=lambda: "")
counter_prefix = options.create_option(
name="counter_prefix",
ty=str,
docstring="Prefix for the statistics recorded as counter.",
default_factory=lambda: "")
latency_all_edges = options.create_option(
name="latency_all_edges",
ty=bool,
docstring=
"Whether to add latency measurements on all edges. Defaults to False.")
| apache-2.0 |
f0rki/cb-multios | original-challenges/Order_Up/poller/for-release/machine.py | 1 | 7445 | #!/usr/bin/env python
#
# Copyright (C) 2014 Narf Industries <info@narfindustries.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from generator.actions import Actions, Variable
from random import choice, randint
from os.path import dirname, abspath, join
import sys
sys.path.append(join(dirname(dirname(dirname(abspath(__file__)))), "support"))
import support as sp
from common import DEBUG, CONFIG
from orderup import OrderUp
from table import Table, Customer, Order
def recv_status(status):
return sp.pack_single_string(status)
def recv_uint8(val):
return sp.pack_single_uint8(val)
def recv_uint16(val):
return sp.pack_single_uint16(val)
def recv_uint32(val):
return sp.pack_single_uint32(val)
class OrderUpPoller(Actions):
CMD_ADD_CUST = '\xEE\x01\xEE\x01'
CMD_GET_ORDERS = '\x01\xEE\x01\xEE'
CMD_ORDER_UP = '\x0E\x0E\x0E\x0E'
CMD_TABLE_STATUS = '\xBE\xBE\xBE\xBE'
CMD_QUIT = '\xB0\x01\xB0\x01'
CMD_BUS_TABLES = '\xBB\x11\x11\xBB'
STATUS_OK = '\x04\x04'
STATUS_ERR = '\x44\xEE'
STATUS_QUIT = '\x44\x44'
def send_cmd(self, cmd):
self.write(cmd)
def _make_next_table(self):
tid = self.state['next_tid']
self.state['next_tid'] += 1
## for testing
# seats = 5
## for release
seats = ord(self.magic_page[tid]) % CONFIG['MAX_TABLE_SIZE']
if CONFIG['MIN_TABLE_SIZE'] > seats:
seats += CONFIG['MIN_TABLE_SIZE']
return Table(tid=tid, seats=seats, customers=[])
def _load_tables(self):
for tid in range(CONFIG['TABLE_CNT']):
self.state['e'].add_table(self._make_next_table())
def start(self):
#self.delay(100)
self.state['next_tid'] = 1
self.state['e'] = OrderUp()
self._load_tables()
if DEBUG:
print "------- start -----------"
def add_customers(self):
'''
A new group of customers arrives at the restaurant.
'''
self.send_cmd(self.CMD_ADD_CUST)
if DEBUG:
print "cmd: add customer -----------"
new_customers = self.state['e'].get_next_arriving_customers()
if DEBUG:
print " {0} customers to be added.".format(len(new_customers))
# send customer count
write_data = sp.pack_single_uint8(len(new_customers))
# send all customers on new_customers list
for c in new_customers:
if DEBUG:
print " packing customer {0}".format(c.id)
write_data += sp.pack_single_uint32(c.id)
self.write(write_data)
# add all customers on new_customers list to waiting_list
# try to seat all customers on waiting_list at empty tables
seated_cnt = self.state['e'].seat_customers(new_customers)
# recv number of customers seated
if DEBUG:
print " {0} seated".format(seated_cnt)
recv_buf = recv_uint8(seated_cnt)
recv_buf += recv_status(self.STATUS_OK)
self.read(length=len(recv_buf), expect=recv_buf)
return 0
def table_status(self):
'''
Get the status of all tables.
'''
self.send_cmd(self.CMD_TABLE_STATUS)
if DEBUG:
print "cmd: table status -----------"
tss = self.state['e'].get_status_of_tables()
packed = ''
for t in tss:
packed += sp.pack_single_uint32(t)
if DEBUG:
print " status {0}".format(t)
recv_buf = packed
recv_buf += recv_status(self.STATUS_OK)
self.read(length=len(recv_buf), expect=recv_buf)
return 0
def get_orders(self):
'''
Get orders from tables with Customers that are ready to order.
'''
self.send_cmd(self.CMD_GET_ORDERS)
if DEBUG:
print "cmd: get orders -----------"
orders = self.state['e'].get_orders(self.magic_page)
# recv order count
recv_buf = ''
recv_buf += sp.pack_single_uint8(len(orders))
if DEBUG:
print " {0} orders: {1}".format(len(orders), orders)
if 0 < len(orders):
packed = self.state['e'].pack_orders(orders)
# recv orders
recv_buf += packed
recv_buf += recv_status(self.STATUS_OK)
self.read(length=len(recv_buf), expect=recv_buf)
return 0
def order_up(self):
'''
Deliver completed orders to the relevant Customers.
'''
self.send_cmd(self.CMD_ORDER_UP)
if DEBUG:
print "cmd: order up -----------"
orders_taken = self.state['e'].get_ready_orders(count=0) # 0 means all
order_count = len(orders_taken)
# send number of orders
write_buf = ''
write_buf += sp.pack_single_uint8(order_count)
# send orders
if 0 < order_count:
packed = self.state['e'].pack_orders(orders_taken)
write_buf += packed
self.write(write_buf)
recv_buf = recv_status(self.STATUS_OK)
self.read(length=len(recv_buf), expect=recv_buf)
return 0
def full_meal(self):
self.add_customers()
self.get_orders() # app
self.order_up()
self.get_orders() # meal
self.order_up()
self.get_orders() # des
self.order_up()
return 0
def bus_tables(self):
'''
For all tables where customers are finished, clear and reset the table.
'''
self.send_cmd(self.CMD_BUS_TABLES)
if DEBUG:
print "cmd: bus tables -----------"
bus_cnt = self.state['e'].bus_finished_tables()
packed = sp.pack_single_uint8(bus_cnt)
if DEBUG:
print " bus count {0}".format(bus_cnt)
recv_buf = packed
recv_buf += recv_status(self.STATUS_OK)
self.read(length=len(recv_buf), expect=recv_buf)
return 0
def quit(self):
'''
Quit cleanly
'''
self.send_cmd(self.CMD_QUIT)
if DEBUG:
print "cmd: quit -----------"
recv_buf = recv_status(self.STATUS_QUIT)
self.read(length=len(recv_buf), expect=recv_buf)
return -1
def broker(self):
'''
Branching node for all nodes
'''
return 0
| mit |
evaschalde/odoo | addons/crm/res_partner.py | 159 | 5149 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
class res_partner(osv.osv):
""" Inherits partner and adds CRM information in the partner form """
_inherit = 'res.partner'
def _opportunity_meeting_phonecall_count(self, cr, uid, ids, field_name, arg, context=None):
res = dict(map(lambda x: (x,{'opportunity_count': 0, 'meeting_count': 0}), ids))
# the user may not have access rights for opportunities or meetings
try:
for partner in self.browse(cr, uid, ids, context):
if partner.is_company:
operator = 'child_of'
else:
operator = '='
opp_ids = self.pool['crm.lead'].search(cr, uid, [('partner_id', operator, partner.id), ('type', '=', 'opportunity'), ('probability', '<', '100')], context=context)
res[partner.id] = {
'opportunity_count': len(opp_ids),
'meeting_count': len(partner.meeting_ids),
}
except:
pass
return res
def _phonecall_count(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for partner in self.browse(cr, uid, ids, context):
res[partner.id] = len(partner.phonecall_ids)
return res
_columns = {
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
'opportunity_ids': fields.one2many('crm.lead', 'partner_id',\
'Leads and Opportunities', domain=[('probability', 'not in', ['0', '100'])]),
'meeting_ids': fields.many2many('calendar.event', 'calendar_event_res_partner_rel','res_partner_id', 'calendar_event_id',
'Meetings'),
'phonecall_ids': fields.one2many('crm.phonecall', 'partner_id',\
'Phonecalls'),
'opportunity_count': fields.function(_opportunity_meeting_phonecall_count, string="Opportunity", type='integer', multi='opp_meet'),
'meeting_count': fields.function(_opportunity_meeting_phonecall_count, string="# Meetings", type='integer', multi='opp_meet'),
'phonecall_count': fields.function(_phonecall_count, string="Phonecalls", type="integer"),
}
def redirect_partner_form(self, cr, uid, partner_id, context=None):
search_view = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'view_res_partner_filter')
value = {
'domain': "[]",
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'res.partner',
'res_id': int(partner_id),
'view_id': False,
'context': context,
'type': 'ir.actions.act_window',
'search_view_id': search_view and search_view[1] or False
}
return value
def make_opportunity(self, cr, uid, ids, opportunity_summary, planned_revenue=0.0, probability=0.0, partner_id=None, context=None):
categ_obj = self.pool.get('crm.case.categ')
categ_ids = categ_obj.search(cr, uid, [('object_id.model','=','crm.lead')])
lead_obj = self.pool.get('crm.lead')
opportunity_ids = {}
for partner in self.browse(cr, uid, ids, context=context):
if not partner_id:
partner_id = partner.id
opportunity_id = lead_obj.create(cr, uid, {
'name' : opportunity_summary,
'planned_revenue' : planned_revenue,
'probability' : probability,
'partner_id' : partner_id,
'categ_ids' : categ_ids and categ_ids[0:1] or [],
'type': 'opportunity'
}, context=context)
opportunity_ids[partner_id] = opportunity_id
return opportunity_ids
def schedule_meeting(self, cr, uid, ids, context=None):
partner_ids = list(ids)
partner_ids.append(self.pool.get('res.users').browse(cr, uid, uid).partner_id.id)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'calendar', 'action_calendar_event', context)
res['context'] = {
'search_default_partner_ids': list(ids),
'default_partner_ids': partner_ids,
}
return res
| agpl-3.0 |
40223211/2015cd_midterm- | static/Brython3.1.1-20150328-091302/Lib/browser/object_storage.py | 627 | 1315 | import pickle
class __UnProvided():
pass
class ObjectStorage():
def __init__(self, storage):
self.storage = storage
def __delitem__(self, key):
del self.storage[pickle.dumps(key)]
def __getitem__(self, key):
return pickle.loads(self.storage[pickle.dumps(key)])
def __setitem__(self, key, value):
self.storage[pickle.dumps(key)] = pickle.dumps(value)
def __contains__(self, key):
return pickle.dumps(key) in self.storage
def get(self, key, default=None):
if pickle.dumps(key) in self.storage:
return self.storage[pickle.dumps(key)]
return default
def pop(self, key, default=__UnProvided()):
if type(default) is __UnProvided or pickle.dumps(key) in self.storage:
return pickle.loads(self.storage.pop(pickle.dumps(key)))
return default
def __iter__(self):
keys = self.keys()
return keys.__iter__()
def keys(self):
return [pickle.loads(key) for key in self.storage.keys()]
def values(self):
return [pickle.loads(val) for val in self.storage.values()]
def items(self):
return list(zip(self.keys(), self.values()))
def clear(self):
self.storage.clear()
def __len__(self):
return len(self.storage)
| gpl-3.0 |
zozo123/buildbot | master/buildbot/test/unit/test_www_service.py | 1 | 4308 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import mock
import os
from buildbot.test.fake import fakemaster
from buildbot.test.util import www
from buildbot.www import auth
from buildbot.www import resource
from buildbot.www import rest
from buildbot.www import service
from twisted.internet import defer
from twisted.trial import unittest
class NeedsReconfigResource(resource.Resource):
needsReconfig = True
reconfigs = 0
def reconfigResource(self, config):
NeedsReconfigResource.reconfigs += 1
class Test(www.WwwTestMixin, unittest.TestCase):
def setUp(self):
self.master = fakemaster.make_master()
self.svc = self.master.www = service.WWWService(self.master)
def makeConfig(self, **kwargs):
pwd = os.getcwd()
w = dict(url='h:/', port=None, public_html=pwd, auth=auth.NoAuth())
w.update(kwargs)
new_config = mock.Mock()
new_config.www = w
self.master.config = new_config
return new_config
def test_reconfigService_no_port(self):
new_config = self.makeConfig()
d = self.svc.reconfigService(new_config)
@d.addCallback
def check(_):
self.assertEqual(self.svc.site, None)
return d
@defer.inlineCallbacks
def test_reconfigService_reconfigResources(self):
new_config = self.makeConfig(port=8080)
self.patch(rest, 'RestRootResource', NeedsReconfigResource)
NeedsReconfigResource.reconfigs = 0
# first time, reconfigResource gets called along with setupSite
yield self.svc.reconfigService(new_config)
self.assertEqual(NeedsReconfigResource.reconfigs, 1)
# and the next time, setupSite isn't called, but reconfigResource is
yield self.svc.reconfigService(new_config)
self.assertEqual(NeedsReconfigResource.reconfigs, 2)
def test_reconfigService_port(self):
new_config = self.makeConfig(port=20)
d = self.svc.reconfigService(new_config)
@d.addCallback
def check(_):
self.assertNotEqual(self.svc.site, None)
self.assertNotEqual(self.svc.port_service, None)
self.assertEqual(self.svc.port, 20)
return d
def test_reconfigService_port_changes(self):
new_config = self.makeConfig(port=20)
d = self.svc.reconfigService(new_config)
@d.addCallback
def reconfig(_):
newer_config = self.makeConfig(port=999)
return self.svc.reconfigService(newer_config)
@d.addCallback
def check(_):
self.assertNotEqual(self.svc.site, None)
self.assertNotEqual(self.svc.port_service, None)
self.assertEqual(self.svc.port, 999)
return d
def test_reconfigService_port_changes_to_none(self):
new_config = self.makeConfig(port=20)
d = self.svc.reconfigService(new_config)
@d.addCallback
def reconfig(_):
newer_config = self.makeConfig()
return self.svc.reconfigService(newer_config)
@d.addCallback
def check(_):
# (note the site sticks around)
self.assertEqual(self.svc.port_service, None)
self.assertEqual(self.svc.port, None)
return d
def test_setupSite(self):
self.svc.setupSite(self.makeConfig())
site = self.svc.site
# check that it has the right kind of resources attached to its
# root
root = site.resource
req = mock.Mock()
self.assertIsInstance(root.getChildWithDefault('api', req),
rest.RestRootResource)
| gpl-3.0 |
Creworker/FreeCAD | src/Mod/Path/Init.py | 27 | 2043 | #***************************************************************************
#* (c) Yorik van Havre (yorik@uncreated.net) 2014 *
#* *
#* This file is part of the FreeCAD CAx development system. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* FreeCAD is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Lesser General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with FreeCAD; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************/
# Get the Parameter Group of this module
ParGrp = App.ParamGet("System parameter:Modules").GetGroup("Path")
# Set the needed information
ParGrp.SetString("HelpIndex", "Path/Help/index.html")
ParGrp.SetString("WorkBenchName", "Path")
ParGrp.SetString("WorkBenchModule", "PathWorkbench.py")
| lgpl-2.1 |
glancyea/wastepermitcontent | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/xcode_emulation.py | 1283 | 65086 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module contains classes that help to emulate xcodebuild behavior on top of
other build systems, such as make and ninja.
"""
import copy
import gyp.common
import os
import os.path
import re
import shlex
import subprocess
import sys
import tempfile
from gyp.common import GypError
# Populated lazily by XcodeVersion, for efficiency, and to fix an issue when
# "xcodebuild" is called too quickly (it has been found to return incorrect
# version number).
XCODE_VERSION_CACHE = None
# Populated lazily by GetXcodeArchsDefault, to an |XcodeArchsDefault| instance
# corresponding to the installed version of Xcode.
XCODE_ARCHS_DEFAULT_CACHE = None
def XcodeArchsVariableMapping(archs, archs_including_64_bit=None):
"""Constructs a dictionary with expansion for $(ARCHS_STANDARD) variable,
and optionally for $(ARCHS_STANDARD_INCLUDING_64_BIT)."""
mapping = {'$(ARCHS_STANDARD)': archs}
if archs_including_64_bit:
mapping['$(ARCHS_STANDARD_INCLUDING_64_BIT)'] = archs_including_64_bit
return mapping
class XcodeArchsDefault(object):
"""A class to resolve ARCHS variable from xcode_settings, resolving Xcode
macros and implementing filtering by VALID_ARCHS. The expansion of macros
depends on the SDKROOT used ("macosx", "iphoneos", "iphonesimulator") and
on the version of Xcode.
"""
# Match variable like $(ARCHS_STANDARD).
variable_pattern = re.compile(r'\$\([a-zA-Z_][a-zA-Z0-9_]*\)$')
def __init__(self, default, mac, iphonesimulator, iphoneos):
self._default = (default,)
self._archs = {'mac': mac, 'ios': iphoneos, 'iossim': iphonesimulator}
def _VariableMapping(self, sdkroot):
"""Returns the dictionary of variable mapping depending on the SDKROOT."""
sdkroot = sdkroot.lower()
if 'iphoneos' in sdkroot:
return self._archs['ios']
elif 'iphonesimulator' in sdkroot:
return self._archs['iossim']
else:
return self._archs['mac']
def _ExpandArchs(self, archs, sdkroot):
"""Expands variables references in ARCHS, and remove duplicates."""
variable_mapping = self._VariableMapping(sdkroot)
expanded_archs = []
for arch in archs:
if self.variable_pattern.match(arch):
variable = arch
try:
variable_expansion = variable_mapping[variable]
for arch in variable_expansion:
if arch not in expanded_archs:
expanded_archs.append(arch)
except KeyError as e:
print 'Warning: Ignoring unsupported variable "%s".' % variable
elif arch not in expanded_archs:
expanded_archs.append(arch)
return expanded_archs
def ActiveArchs(self, archs, valid_archs, sdkroot):
"""Expands variables references in ARCHS, and filter by VALID_ARCHS if it
is defined (if not set, Xcode accept any value in ARCHS, otherwise, only
values present in VALID_ARCHS are kept)."""
expanded_archs = self._ExpandArchs(archs or self._default, sdkroot or '')
if valid_archs:
filtered_archs = []
for arch in expanded_archs:
if arch in valid_archs:
filtered_archs.append(arch)
expanded_archs = filtered_archs
return expanded_archs
def GetXcodeArchsDefault():
"""Returns the |XcodeArchsDefault| object to use to expand ARCHS for the
installed version of Xcode. The default values used by Xcode for ARCHS
and the expansion of the variables depends on the version of Xcode used.
For all version anterior to Xcode 5.0 or posterior to Xcode 5.1 included
uses $(ARCHS_STANDARD) if ARCHS is unset, while Xcode 5.0 to 5.0.2 uses
$(ARCHS_STANDARD_INCLUDING_64_BIT). This variable was added to Xcode 5.0
and deprecated with Xcode 5.1.
For "macosx" SDKROOT, all version starting with Xcode 5.0 includes 64-bit
architecture as part of $(ARCHS_STANDARD) and default to only building it.
For "iphoneos" and "iphonesimulator" SDKROOT, 64-bit architectures are part
of $(ARCHS_STANDARD_INCLUDING_64_BIT) from Xcode 5.0. From Xcode 5.1, they
are also part of $(ARCHS_STANDARD).
All thoses rules are coded in the construction of the |XcodeArchsDefault|
object to use depending on the version of Xcode detected. The object is
for performance reason."""
global XCODE_ARCHS_DEFAULT_CACHE
if XCODE_ARCHS_DEFAULT_CACHE:
return XCODE_ARCHS_DEFAULT_CACHE
xcode_version, _ = XcodeVersion()
if xcode_version < '0500':
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD)',
XcodeArchsVariableMapping(['i386']),
XcodeArchsVariableMapping(['i386']),
XcodeArchsVariableMapping(['armv7']))
elif xcode_version < '0510':
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD_INCLUDING_64_BIT)',
XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
XcodeArchsVariableMapping(['i386'], ['i386', 'x86_64']),
XcodeArchsVariableMapping(
['armv7', 'armv7s'],
['armv7', 'armv7s', 'arm64']))
else:
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD)',
XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
XcodeArchsVariableMapping(['i386', 'x86_64'], ['i386', 'x86_64']),
XcodeArchsVariableMapping(
['armv7', 'armv7s', 'arm64'],
['armv7', 'armv7s', 'arm64']))
return XCODE_ARCHS_DEFAULT_CACHE
class XcodeSettings(object):
"""A class that understands the gyp 'xcode_settings' object."""
# Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_sdk_path_cache = {}
_sdk_root_cache = {}
# Populated lazily by GetExtraPlistItems(). Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_plist_cache = {}
# Populated lazily by GetIOSPostbuilds. Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_codesigning_key_cache = {}
def __init__(self, spec):
self.spec = spec
self.isIOS = False
# Per-target 'xcode_settings' are pushed down into configs earlier by gyp.
# This means self.xcode_settings[config] always contains all settings
# for that config -- the per-target settings as well. Settings that are
# the same for all configs are implicitly per-target settings.
self.xcode_settings = {}
configs = spec['configurations']
for configname, config in configs.iteritems():
self.xcode_settings[configname] = config.get('xcode_settings', {})
self._ConvertConditionalKeys(configname)
if self.xcode_settings[configname].get('IPHONEOS_DEPLOYMENT_TARGET',
None):
self.isIOS = True
# This is only non-None temporarily during the execution of some methods.
self.configname = None
# Used by _AdjustLibrary to match .a and .dylib entries in libraries.
self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$')
def _ConvertConditionalKeys(self, configname):
"""Converts or warns on conditional keys. Xcode supports conditional keys,
such as CODE_SIGN_IDENTITY[sdk=iphoneos*]. This is a partial implementation
with some keys converted while the rest force a warning."""
settings = self.xcode_settings[configname]
conditional_keys = [key for key in settings if key.endswith(']')]
for key in conditional_keys:
# If you need more, speak up at http://crbug.com/122592
if key.endswith("[sdk=iphoneos*]"):
if configname.endswith("iphoneos"):
new_key = key.split("[")[0]
settings[new_key] = settings[key]
else:
print 'Warning: Conditional keys not implemented, ignoring:', \
' '.join(conditional_keys)
del settings[key]
def _Settings(self):
assert self.configname
return self.xcode_settings[self.configname]
def _Test(self, test_key, cond_key, default):
return self._Settings().get(test_key, default) == cond_key
def _Appendf(self, lst, test_key, format_str, default=None):
if test_key in self._Settings():
lst.append(format_str % str(self._Settings()[test_key]))
elif default:
lst.append(format_str % str(default))
def _WarnUnimplemented(self, test_key):
if test_key in self._Settings():
print 'Warning: Ignoring not yet implemented key "%s".' % test_key
def IsBinaryOutputFormat(self, configname):
default = "binary" if self.isIOS else "xml"
format = self.xcode_settings[configname].get('INFOPLIST_OUTPUT_FORMAT',
default)
return format == "binary"
def _IsBundle(self):
return int(self.spec.get('mac_bundle', 0)) != 0
def _IsIosAppExtension(self):
return int(self.spec.get('ios_app_extension', 0)) != 0
def _IsIosWatchKitExtension(self):
return int(self.spec.get('ios_watchkit_extension', 0)) != 0
def _IsIosWatchApp(self):
return int(self.spec.get('ios_watch_app', 0)) != 0
def GetFrameworkVersion(self):
"""Returns the framework version of the current target. Only valid for
bundles."""
assert self._IsBundle()
return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A')
def GetWrapperExtension(self):
"""Returns the bundle extension (.app, .framework, .plugin, etc). Only
valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('loadable_module', 'shared_library'):
default_wrapper_extension = {
'loadable_module': 'bundle',
'shared_library': 'framework',
}[self.spec['type']]
wrapper_extension = self.GetPerTargetSetting(
'WRAPPER_EXTENSION', default=default_wrapper_extension)
return '.' + self.spec.get('product_extension', wrapper_extension)
elif self.spec['type'] == 'executable':
if self._IsIosAppExtension() or self._IsIosWatchKitExtension():
return '.' + self.spec.get('product_extension', 'appex')
else:
return '.' + self.spec.get('product_extension', 'app')
else:
assert False, "Don't know extension for '%s', target '%s'" % (
self.spec['type'], self.spec['target_name'])
def GetProductName(self):
"""Returns PRODUCT_NAME."""
return self.spec.get('product_name', self.spec['target_name'])
def GetFullProductName(self):
"""Returns FULL_PRODUCT_NAME."""
if self._IsBundle():
return self.GetWrapperName()
else:
return self._GetStandaloneBinaryPath()
def GetWrapperName(self):
"""Returns the directory name of the bundle represented by this target.
Only valid for bundles."""
assert self._IsBundle()
return self.GetProductName() + self.GetWrapperExtension()
def GetBundleContentsFolderPath(self):
"""Returns the qualified path to the bundle's contents folder. E.g.
Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles."""
if self.isIOS:
return self.GetWrapperName()
assert self._IsBundle()
if self.spec['type'] == 'shared_library':
return os.path.join(
self.GetWrapperName(), 'Versions', self.GetFrameworkVersion())
else:
# loadable_modules have a 'Contents' folder like executables.
return os.path.join(self.GetWrapperName(), 'Contents')
def GetBundleResourceFolder(self):
"""Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles."""
assert self._IsBundle()
if self.isIOS:
return self.GetBundleContentsFolderPath()
return os.path.join(self.GetBundleContentsFolderPath(), 'Resources')
def GetBundlePlistPath(self):
"""Returns the qualified path to the bundle's plist file. E.g.
Chromium.app/Contents/Info.plist. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('executable', 'loadable_module'):
return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist')
else:
return os.path.join(self.GetBundleContentsFolderPath(),
'Resources', 'Info.plist')
def GetProductType(self):
"""Returns the PRODUCT_TYPE of this target."""
if self._IsIosAppExtension():
assert self._IsBundle(), ('ios_app_extension flag requires mac_bundle '
'(target %s)' % self.spec['target_name'])
return 'com.apple.product-type.app-extension'
if self._IsIosWatchKitExtension():
assert self._IsBundle(), ('ios_watchkit_extension flag requires '
'mac_bundle (target %s)' % self.spec['target_name'])
return 'com.apple.product-type.watchkit-extension'
if self._IsIosWatchApp():
assert self._IsBundle(), ('ios_watch_app flag requires mac_bundle '
'(target %s)' % self.spec['target_name'])
return 'com.apple.product-type.application.watchapp'
if self._IsBundle():
return {
'executable': 'com.apple.product-type.application',
'loadable_module': 'com.apple.product-type.bundle',
'shared_library': 'com.apple.product-type.framework',
}[self.spec['type']]
else:
return {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
}[self.spec['type']]
def GetMachOType(self):
"""Returns the MACH_O_TYPE of this target."""
# Weird, but matches Xcode.
if not self._IsBundle() and self.spec['type'] == 'executable':
return ''
return {
'executable': 'mh_execute',
'static_library': 'staticlib',
'shared_library': 'mh_dylib',
'loadable_module': 'mh_bundle',
}[self.spec['type']]
def _GetBundleBinaryPath(self):
"""Returns the name of the bundle binary of by this target.
E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('shared_library') or self.isIOS:
path = self.GetBundleContentsFolderPath()
elif self.spec['type'] in ('executable', 'loadable_module'):
path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS')
return os.path.join(path, self.GetExecutableName())
def _GetStandaloneExecutableSuffix(self):
if 'product_extension' in self.spec:
return '.' + self.spec['product_extension']
return {
'executable': '',
'static_library': '.a',
'shared_library': '.dylib',
'loadable_module': '.so',
}[self.spec['type']]
def _GetStandaloneExecutablePrefix(self):
return self.spec.get('product_prefix', {
'executable': '',
'static_library': 'lib',
'shared_library': 'lib',
# Non-bundled loadable_modules are called foo.so for some reason
# (that is, .so and no prefix) with the xcode build -- match that.
'loadable_module': '',
}[self.spec['type']])
def _GetStandaloneBinaryPath(self):
"""Returns the name of the non-bundle binary represented by this target.
E.g. hello_world. Only valid for non-bundles."""
assert not self._IsBundle()
assert self.spec['type'] in (
'executable', 'shared_library', 'static_library', 'loadable_module'), (
'Unexpected type %s' % self.spec['type'])
target = self.spec['target_name']
if self.spec['type'] == 'static_library':
if target[:3] == 'lib':
target = target[3:]
elif self.spec['type'] in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = self._GetStandaloneExecutablePrefix()
target = self.spec.get('product_name', target)
target_ext = self._GetStandaloneExecutableSuffix()
return target_prefix + target + target_ext
def GetExecutableName(self):
"""Returns the executable name of the bundle represented by this target.
E.g. Chromium."""
if self._IsBundle():
return self.spec.get('product_name', self.spec['target_name'])
else:
return self._GetStandaloneBinaryPath()
def GetExecutablePath(self):
"""Returns the directory name of the bundle represented by this target. E.g.
Chromium.app/Contents/MacOS/Chromium."""
if self._IsBundle():
return self._GetBundleBinaryPath()
else:
return self._GetStandaloneBinaryPath()
def GetActiveArchs(self, configname):
"""Returns the architectures this target should be built for."""
config_settings = self.xcode_settings[configname]
xcode_archs_default = GetXcodeArchsDefault()
return xcode_archs_default.ActiveArchs(
config_settings.get('ARCHS'),
config_settings.get('VALID_ARCHS'),
config_settings.get('SDKROOT'))
def _GetSdkVersionInfoItem(self, sdk, infoitem):
# xcodebuild requires Xcode and can't run on Command Line Tools-only
# systems from 10.7 onward.
# Since the CLT has no SDK paths anyway, returning None is the
# most sensible route and should still do the right thing.
try:
return GetStdout(['xcodebuild', '-version', '-sdk', sdk, infoitem])
except:
pass
def _SdkRoot(self, configname):
if configname is None:
configname = self.configname
return self.GetPerConfigSetting('SDKROOT', configname, default='')
def _SdkPath(self, configname=None):
sdk_root = self._SdkRoot(configname)
if sdk_root.startswith('/'):
return sdk_root
return self._XcodeSdkPath(sdk_root)
def _XcodeSdkPath(self, sdk_root):
if sdk_root not in XcodeSettings._sdk_path_cache:
sdk_path = self._GetSdkVersionInfoItem(sdk_root, 'Path')
XcodeSettings._sdk_path_cache[sdk_root] = sdk_path
if sdk_root:
XcodeSettings._sdk_root_cache[sdk_path] = sdk_root
return XcodeSettings._sdk_path_cache[sdk_root]
def _AppendPlatformVersionMinFlags(self, lst):
self._Appendf(lst, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
if 'IPHONEOS_DEPLOYMENT_TARGET' in self._Settings():
# TODO: Implement this better?
sdk_path_basename = os.path.basename(self._SdkPath())
if sdk_path_basename.lower().startswith('iphonesimulator'):
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-mios-simulator-version-min=%s')
else:
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-miphoneos-version-min=%s')
def GetCflags(self, configname, arch=None):
"""Returns flags that need to be added to .c, .cc, .m, and .mm
compilations."""
# This functions (and the similar ones below) do not offer complete
# emulation of all xcode_settings keys. They're implemented on demand.
self.configname = configname
cflags = []
sdk_root = self._SdkPath()
if 'SDKROOT' in self._Settings() and sdk_root:
cflags.append('-isysroot %s' % sdk_root)
if self._Test('CLANG_WARN_CONSTANT_CONVERSION', 'YES', default='NO'):
cflags.append('-Wconstant-conversion')
if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'):
cflags.append('-funsigned-char')
if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'):
cflags.append('-fasm-blocks')
if 'GCC_DYNAMIC_NO_PIC' in self._Settings():
if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES':
cflags.append('-mdynamic-no-pic')
else:
pass
# TODO: In this case, it depends on the target. xcode passes
# mdynamic-no-pic by default for executable and possibly static lib
# according to mento
if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'):
cflags.append('-mpascal-strings')
self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s')
if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'):
dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf')
if dbg_format == 'dwarf':
cflags.append('-gdwarf-2')
elif dbg_format == 'stabs':
raise NotImplementedError('stabs debug format is not supported yet.')
elif dbg_format == 'dwarf-with-dsym':
cflags.append('-gdwarf-2')
else:
raise NotImplementedError('Unknown debug format %s' % dbg_format)
if self._Settings().get('GCC_STRICT_ALIASING') == 'YES':
cflags.append('-fstrict-aliasing')
elif self._Settings().get('GCC_STRICT_ALIASING') == 'NO':
cflags.append('-fno-strict-aliasing')
if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'):
cflags.append('-fvisibility=hidden')
if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'):
cflags.append('-Werror')
if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'):
cflags.append('-Wnewline-eof')
# In Xcode, this is only activated when GCC_COMPILER_VERSION is clang or
# llvm-gcc. It also requires a fairly recent libtool, and
# if the system clang isn't used, DYLD_LIBRARY_PATH needs to contain the
# path to the libLTO.dylib that matches the used clang.
if self._Test('LLVM_LTO', 'YES', default='NO'):
cflags.append('-flto')
self._AppendPlatformVersionMinFlags(cflags)
# TODO:
if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'):
self._WarnUnimplemented('COPY_PHASE_STRIP')
self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS')
self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS')
# TODO: This is exported correctly, but assigning to it is not supported.
self._WarnUnimplemented('MACH_O_TYPE')
self._WarnUnimplemented('PRODUCT_TYPE')
if arch is not None:
archs = [arch]
else:
assert self.configname
archs = self.GetActiveArchs(self.configname)
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
cflags.append('-arch ' + archs[0])
if archs[0] in ('i386', 'x86_64'):
if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse3')
if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES',
default='NO'):
cflags.append('-mssse3') # Note 3rd 's'.
if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.1')
if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.2')
cflags += self._Settings().get('WARNING_CFLAGS', [])
if sdk_root:
framework_root = sdk_root
else:
framework_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
cflags.append('-F' + directory.replace('$(SDKROOT)', framework_root))
self.configname = None
return cflags
def GetCflagsC(self, configname):
"""Returns flags that need to be added to .c, and .m compilations."""
self.configname = configname
cflags_c = []
if self._Settings().get('GCC_C_LANGUAGE_STANDARD', '') == 'ansi':
cflags_c.append('-ansi')
else:
self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s')
cflags_c += self._Settings().get('OTHER_CFLAGS', [])
self.configname = None
return cflags_c
def GetCflagsCC(self, configname):
"""Returns flags that need to be added to .cc, and .mm compilations."""
self.configname = configname
cflags_cc = []
clang_cxx_language_standard = self._Settings().get(
'CLANG_CXX_LANGUAGE_STANDARD')
# Note: Don't make c++0x to c++11 so that c++0x can be used with older
# clangs that don't understand c++11 yet (like Xcode 4.2's).
if clang_cxx_language_standard:
cflags_cc.append('-std=%s' % clang_cxx_language_standard)
self._Appendf(cflags_cc, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'):
cflags_cc.append('-fno-rtti')
if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'):
cflags_cc.append('-fno-exceptions')
if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'):
cflags_cc.append('-fvisibility-inlines-hidden')
if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'):
cflags_cc.append('-fno-threadsafe-statics')
# Note: This flag is a no-op for clang, it only has an effect for gcc.
if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'):
cflags_cc.append('-Wno-invalid-offsetof')
other_ccflags = []
for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']):
# TODO: More general variable expansion. Missing in many other places too.
if flag in ('$inherited', '$(inherited)', '${inherited}'):
flag = '$OTHER_CFLAGS'
if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'):
other_ccflags += self._Settings().get('OTHER_CFLAGS', [])
else:
other_ccflags.append(flag)
cflags_cc += other_ccflags
self.configname = None
return cflags_cc
def _AddObjectiveCGarbageCollectionFlags(self, flags):
gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported')
if gc_policy == 'supported':
flags.append('-fobjc-gc')
elif gc_policy == 'required':
flags.append('-fobjc-gc-only')
def _AddObjectiveCARCFlags(self, flags):
if self._Test('CLANG_ENABLE_OBJC_ARC', 'YES', default='NO'):
flags.append('-fobjc-arc')
def _AddObjectiveCMissingPropertySynthesisFlags(self, flags):
if self._Test('CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS',
'YES', default='NO'):
flags.append('-Wobjc-missing-property-synthesis')
def GetCflagsObjC(self, configname):
"""Returns flags that need to be added to .m compilations."""
self.configname = configname
cflags_objc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objc)
self._AddObjectiveCARCFlags(cflags_objc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objc)
self.configname = None
return cflags_objc
def GetCflagsObjCC(self, configname):
"""Returns flags that need to be added to .mm compilations."""
self.configname = configname
cflags_objcc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objcc)
self._AddObjectiveCARCFlags(cflags_objcc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objcc)
if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'):
cflags_objcc.append('-fobjc-call-cxx-cdtors')
self.configname = None
return cflags_objcc
def GetInstallNameBase(self):
"""Return DYLIB_INSTALL_NAME_BASE for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
install_base = self.GetPerTargetSetting(
'DYLIB_INSTALL_NAME_BASE',
default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib')
return install_base
def _StandardizePath(self, path):
"""Do :standardizepath processing for path."""
# I'm not quite sure what :standardizepath does. Just call normpath(),
# but don't let @executable_path/../foo collapse to foo.
if '/' in path:
prefix, rest = '', path
if path.startswith('@'):
prefix, rest = path.split('/', 1)
rest = os.path.normpath(rest) # :standardizepath
path = os.path.join(prefix, rest)
return path
def GetInstallName(self):
"""Return LD_DYLIB_INSTALL_NAME for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
default_install_name = \
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
install_name = self.GetPerTargetSetting(
'LD_DYLIB_INSTALL_NAME', default=default_install_name)
# Hardcode support for the variables used in chromium for now, to
# unblock people using the make build.
if '$' in install_name:
assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
'$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
'yet in target \'%s\' (got \'%s\')' %
(self.spec['target_name'], install_name))
install_name = install_name.replace(
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
self._StandardizePath(self.GetInstallNameBase()))
if self._IsBundle():
# These are only valid for bundles, hence the |if|.
install_name = install_name.replace(
'$(WRAPPER_NAME)', self.GetWrapperName())
install_name = install_name.replace(
'$(PRODUCT_NAME)', self.GetProductName())
else:
assert '$(WRAPPER_NAME)' not in install_name
assert '$(PRODUCT_NAME)' not in install_name
install_name = install_name.replace(
'$(EXECUTABLE_PATH)', self.GetExecutablePath())
return install_name
def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path):
"""Checks if ldflag contains a filename and if so remaps it from
gyp-directory-relative to build-directory-relative."""
# This list is expanded on demand.
# They get matched as:
# -exported_symbols_list file
# -Wl,exported_symbols_list file
# -Wl,exported_symbols_list,file
LINKER_FILE = r'(\S+)'
WORD = r'\S+'
linker_flags = [
['-exported_symbols_list', LINKER_FILE], # Needed for NaCl.
['-unexported_symbols_list', LINKER_FILE],
['-reexported_symbols_list', LINKER_FILE],
['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting.
]
for flag_pattern in linker_flags:
regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern))
m = regex.match(ldflag)
if m:
ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \
ldflag[m.end(1):]
# Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS,
# TODO(thakis): Update ffmpeg.gyp):
if ldflag.startswith('-L'):
ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):])
return ldflag
def GetLdflags(self, configname, product_dir, gyp_to_build_path, arch=None):
"""Returns flags that need to be passed to the linker.
Args:
configname: The name of the configuration to get ld flags for.
product_dir: The directory where products such static and dynamic
libraries are placed. This is added to the library search path.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
self.configname = configname
ldflags = []
# The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS
# can contain entries that depend on this. Explicitly absolutify these.
for ldflag in self._Settings().get('OTHER_LDFLAGS', []):
ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path))
if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'):
ldflags.append('-Wl,-dead_strip')
if self._Test('PREBINDING', 'YES', default='NO'):
ldflags.append('-Wl,-prebind')
self._Appendf(
ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s')
self._Appendf(
ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s')
self._AppendPlatformVersionMinFlags(ldflags)
if 'SDKROOT' in self._Settings() and self._SdkPath():
ldflags.append('-isysroot ' + self._SdkPath())
for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []):
ldflags.append('-L' + gyp_to_build_path(library_path))
if 'ORDER_FILE' in self._Settings():
ldflags.append('-Wl,-order_file ' +
'-Wl,' + gyp_to_build_path(
self._Settings()['ORDER_FILE']))
if arch is not None:
archs = [arch]
else:
assert self.configname
archs = self.GetActiveArchs(self.configname)
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
ldflags.append('-arch ' + archs[0])
# Xcode adds the product directory by default.
ldflags.append('-L' + product_dir)
install_name = self.GetInstallName()
if install_name and self.spec['type'] != 'loadable_module':
ldflags.append('-install_name ' + install_name.replace(' ', r'\ '))
for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []):
ldflags.append('-Wl,-rpath,' + rpath)
sdk_root = self._SdkPath()
if not sdk_root:
sdk_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
ldflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root))
is_extension = self._IsIosAppExtension() or self._IsIosWatchKitExtension()
if sdk_root and is_extension:
# Adds the link flags for extensions. These flags are common for all
# extensions and provide loader and main function.
# These flags reflect the compilation options used by xcode to compile
# extensions.
ldflags.append('-lpkstart')
if XcodeVersion() < '0900':
ldflags.append(sdk_root +
'/System/Library/PrivateFrameworks/PlugInKit.framework/PlugInKit')
ldflags.append('-fapplication-extension')
ldflags.append('-Xlinker -rpath '
'-Xlinker @executable_path/../../Frameworks')
self._Appendf(ldflags, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
self.configname = None
return ldflags
def GetLibtoolflags(self, configname):
"""Returns flags that need to be passed to the static linker.
Args:
configname: The name of the configuration to get ld flags for.
"""
self.configname = configname
libtoolflags = []
for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []):
libtoolflags.append(libtoolflag)
# TODO(thakis): ARCHS?
self.configname = None
return libtoolflags
def GetPerTargetSettings(self):
"""Gets a list of all the per-target settings. This will only fetch keys
whose values are the same across all configurations."""
first_pass = True
result = {}
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = dict(self.xcode_settings[configname])
first_pass = False
else:
for key, value in self.xcode_settings[configname].iteritems():
if key not in result:
continue
elif result[key] != value:
del result[key]
return result
def GetPerConfigSetting(self, setting, configname, default=None):
if configname in self.xcode_settings:
return self.xcode_settings[configname].get(setting, default)
else:
return self.GetPerTargetSetting(setting, default)
def GetPerTargetSetting(self, setting, default=None):
"""Tries to get xcode_settings.setting from spec. Assumes that the setting
has the same value in all configurations and throws otherwise."""
is_first_pass = True
result = None
for configname in sorted(self.xcode_settings.keys()):
if is_first_pass:
result = self.xcode_settings[configname].get(setting, None)
is_first_pass = False
else:
assert result == self.xcode_settings[configname].get(setting, None), (
"Expected per-target setting for '%s', got per-config setting "
"(target %s)" % (setting, self.spec['target_name']))
if result is None:
return default
return result
def _GetStripPostbuilds(self, configname, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to strip this target's binary. These should be run as postbuilds
before the actual postbuilds run."""
self.configname = configname
result = []
if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and
self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')):
default_strip_style = 'debugging'
if self.spec['type'] == 'loadable_module' and self._IsBundle():
default_strip_style = 'non-global'
elif self.spec['type'] == 'executable':
default_strip_style = 'all'
strip_style = self._Settings().get('STRIP_STYLE', default_strip_style)
strip_flags = {
'all': '',
'non-global': '-x',
'debugging': '-S',
}[strip_style]
explicit_strip_flags = self._Settings().get('STRIPFLAGS', '')
if explicit_strip_flags:
strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags)
if not quiet:
result.append('echo STRIP\\(%s\\)' % self.spec['target_name'])
result.append('strip %s %s' % (strip_flags, output_binary))
self.configname = None
return result
def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to massage this target's debug information. These should be run
as postbuilds before the actual postbuilds run."""
self.configname = configname
# For static libraries, no dSYMs are created.
result = []
if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and
self._Test(
'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and
self.spec['type'] != 'static_library'):
if not quiet:
result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name'])
result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM'))
self.configname = None
return result
def _GetTargetPostbuilds(self, configname, output, output_binary,
quiet=False):
"""Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds."""
# dSYMs need to build before stripping happens.
return (
self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
self._GetStripPostbuilds(configname, output_binary, quiet))
def _GetIOSPostbuilds(self, configname, output_binary):
"""Return a shell command to codesign the iOS output binary so it can
be deployed to a device. This should be run as the very last step of the
build."""
if not (self.isIOS and self.spec['type'] == 'executable'):
return []
settings = self.xcode_settings[configname]
key = self._GetIOSCodeSignIdentityKey(settings)
if not key:
return []
# Warn for any unimplemented signing xcode keys.
unimpl = ['OTHER_CODE_SIGN_FLAGS']
unimpl = set(unimpl) & set(self.xcode_settings[configname].keys())
if unimpl:
print 'Warning: Some codesign keys not implemented, ignoring: %s' % (
', '.join(sorted(unimpl)))
return ['%s code-sign-bundle "%s" "%s" "%s" "%s"' % (
os.path.join('${TARGET_BUILD_DIR}', 'gyp-mac-tool'), key,
settings.get('CODE_SIGN_RESOURCE_RULES_PATH', ''),
settings.get('CODE_SIGN_ENTITLEMENTS', ''),
settings.get('PROVISIONING_PROFILE', ''))
]
def _GetIOSCodeSignIdentityKey(self, settings):
identity = settings.get('CODE_SIGN_IDENTITY')
if not identity:
return None
if identity not in XcodeSettings._codesigning_key_cache:
output = subprocess.check_output(
['security', 'find-identity', '-p', 'codesigning', '-v'])
for line in output.splitlines():
if identity in line:
fingerprint = line.split()[1]
cache = XcodeSettings._codesigning_key_cache
assert identity not in cache or fingerprint == cache[identity], (
"Multiple codesigning fingerprints for identity: %s" % identity)
XcodeSettings._codesigning_key_cache[identity] = fingerprint
return XcodeSettings._codesigning_key_cache.get(identity, '')
def AddImplicitPostbuilds(self, configname, output, output_binary,
postbuilds=[], quiet=False):
"""Returns a list of shell commands that should run before and after
|postbuilds|."""
assert output_binary is not None
pre = self._GetTargetPostbuilds(configname, output, output_binary, quiet)
post = self._GetIOSPostbuilds(configname, output_binary)
return pre + postbuilds + post
def _AdjustLibrary(self, library, config_name=None):
if library.endswith('.framework'):
l = '-framework ' + os.path.splitext(os.path.basename(library))[0]
else:
m = self.library_re.match(library)
if m:
l = '-l' + m.group(1)
else:
l = library
sdk_root = self._SdkPath(config_name)
if not sdk_root:
sdk_root = ''
# Xcode 7 started shipping with ".tbd" (text based stubs) files instead of
# ".dylib" without providing a real support for them. What it does, for
# "/usr/lib" libraries, is do "-L/usr/lib -lname" which is dependent on the
# library order and cause collision when building Chrome.
#
# Instead substitude ".tbd" to ".dylib" in the generated project when the
# following conditions are both true:
# - library is referenced in the gyp file as "$(SDKROOT)/**/*.dylib",
# - the ".dylib" file does not exists but a ".tbd" file do.
library = l.replace('$(SDKROOT)', sdk_root)
if l.startswith('$(SDKROOT)'):
basename, ext = os.path.splitext(library)
if ext == '.dylib' and not os.path.exists(library):
tbd_library = basename + '.tbd'
if os.path.exists(tbd_library):
library = tbd_library
return library
def AdjustLibraries(self, libraries, config_name=None):
"""Transforms entries like 'Cocoa.framework' in libraries into entries like
'-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc.
"""
libraries = [self._AdjustLibrary(library, config_name)
for library in libraries]
return libraries
def _BuildMachineOSBuild(self):
return GetStdout(['sw_vers', '-buildVersion'])
def _XcodeIOSDeviceFamily(self, configname):
family = self.xcode_settings[configname].get('TARGETED_DEVICE_FAMILY', '1')
return [int(x) for x in family.split(',')]
def GetExtraPlistItems(self, configname=None):
"""Returns a dictionary with extra items to insert into Info.plist."""
if configname not in XcodeSettings._plist_cache:
cache = {}
cache['BuildMachineOSBuild'] = self._BuildMachineOSBuild()
xcode, xcode_build = XcodeVersion()
cache['DTXcode'] = xcode
cache['DTXcodeBuild'] = xcode_build
sdk_root = self._SdkRoot(configname)
if not sdk_root:
sdk_root = self._DefaultSdkRoot()
cache['DTSDKName'] = sdk_root
if xcode >= '0430':
cache['DTSDKBuild'] = self._GetSdkVersionInfoItem(
sdk_root, 'ProductBuildVersion')
else:
cache['DTSDKBuild'] = cache['BuildMachineOSBuild']
if self.isIOS:
cache['DTPlatformName'] = cache['DTSDKName']
if configname.endswith("iphoneos"):
cache['DTPlatformVersion'] = self._GetSdkVersionInfoItem(
sdk_root, 'ProductVersion')
cache['CFBundleSupportedPlatforms'] = ['iPhoneOS']
else:
cache['CFBundleSupportedPlatforms'] = ['iPhoneSimulator']
XcodeSettings._plist_cache[configname] = cache
# Include extra plist items that are per-target, not per global
# XcodeSettings.
items = dict(XcodeSettings._plist_cache[configname])
if self.isIOS:
items['UIDeviceFamily'] = self._XcodeIOSDeviceFamily(configname)
return items
def _DefaultSdkRoot(self):
"""Returns the default SDKROOT to use.
Prior to version 5.0.0, if SDKROOT was not explicitly set in the Xcode
project, then the environment variable was empty. Starting with this
version, Xcode uses the name of the newest SDK installed.
"""
xcode_version, xcode_build = XcodeVersion()
if xcode_version < '0500':
return ''
default_sdk_path = self._XcodeSdkPath('')
default_sdk_root = XcodeSettings._sdk_root_cache.get(default_sdk_path)
if default_sdk_root:
return default_sdk_root
try:
all_sdks = GetStdout(['xcodebuild', '-showsdks'])
except:
# If xcodebuild fails, there will be no valid SDKs
return ''
for line in all_sdks.splitlines():
items = line.split()
if len(items) >= 3 and items[-2] == '-sdk':
sdk_root = items[-1]
sdk_path = self._XcodeSdkPath(sdk_root)
if sdk_path == default_sdk_path:
return sdk_root
return ''
class MacPrefixHeader(object):
"""A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature.
This feature consists of several pieces:
* If GCC_PREFIX_HEADER is present, all compilations in that project get an
additional |-include path_to_prefix_header| cflag.
* If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is
instead compiled, and all other compilations in the project get an
additional |-include path_to_compiled_header| instead.
+ Compiled prefix headers have the extension gch. There is one gch file for
every language used in the project (c, cc, m, mm), since gch files for
different languages aren't compatible.
+ gch files themselves are built with the target's normal cflags, but they
obviously don't get the |-include| flag. Instead, they need a -x flag that
describes their language.
+ All o files in the target need to depend on the gch file, to make sure
it's built before any o file is built.
This class helps with some of these tasks, but it needs help from the build
system for writing dependencies to the gch files, for writing build commands
for the gch files, and for figuring out the location of the gch files.
"""
def __init__(self, xcode_settings,
gyp_path_to_build_path, gyp_path_to_build_output):
"""If xcode_settings is None, all methods on this class are no-ops.
Args:
gyp_path_to_build_path: A function that takes a gyp-relative path,
and returns a path relative to the build directory.
gyp_path_to_build_output: A function that takes a gyp-relative path and
a language code ('c', 'cc', 'm', or 'mm'), and that returns a path
to where the output of precompiling that path for that language
should be placed (without the trailing '.gch').
"""
# This doesn't support per-configuration prefix headers. Good enough
# for now.
self.header = None
self.compile_headers = False
if xcode_settings:
self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER')
self.compile_headers = xcode_settings.GetPerTargetSetting(
'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO'
self.compiled_headers = {}
if self.header:
if self.compile_headers:
for lang in ['c', 'cc', 'm', 'mm']:
self.compiled_headers[lang] = gyp_path_to_build_output(
self.header, lang)
self.header = gyp_path_to_build_path(self.header)
def _CompiledHeader(self, lang, arch):
assert self.compile_headers
h = self.compiled_headers[lang]
if arch:
h += '.' + arch
return h
def GetInclude(self, lang, arch=None):
"""Gets the cflags to include the prefix header for language |lang|."""
if self.compile_headers and lang in self.compiled_headers:
return '-include %s' % self._CompiledHeader(lang, arch)
elif self.header:
return '-include %s' % self.header
else:
return ''
def _Gch(self, lang, arch):
"""Returns the actual file name of the prefix header for language |lang|."""
assert self.compile_headers
return self._CompiledHeader(lang, arch) + '.gch'
def GetObjDependencies(self, sources, objs, arch=None):
"""Given a list of source files and the corresponding object files, returns
a list of (source, object, gch) tuples, where |gch| is the build-directory
relative path to the gch file each object file depends on. |compilable[i]|
has to be the source file belonging to |objs[i]|."""
if not self.header or not self.compile_headers:
return []
result = []
for source, obj in zip(sources, objs):
ext = os.path.splitext(source)[1]
lang = {
'.c': 'c',
'.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc',
'.m': 'm',
'.mm': 'mm',
}.get(ext, None)
if lang:
result.append((source, obj, self._Gch(lang, arch)))
return result
def GetPchBuildCommands(self, arch=None):
"""Returns [(path_to_gch, language_flag, language, header)].
|path_to_gch| and |header| are relative to the build directory.
"""
if not self.header or not self.compile_headers:
return []
return [
(self._Gch('c', arch), '-x c-header', 'c', self.header),
(self._Gch('cc', arch), '-x c++-header', 'cc', self.header),
(self._Gch('m', arch), '-x objective-c-header', 'm', self.header),
(self._Gch('mm', arch), '-x objective-c++-header', 'mm', self.header),
]
def XcodeVersion():
"""Returns a tuple of version and build version of installed Xcode."""
# `xcodebuild -version` output looks like
# Xcode 4.6.3
# Build version 4H1503
# or like
# Xcode 3.2.6
# Component versions: DevToolsCore-1809.0; DevToolsSupport-1806.0
# BuildVersion: 10M2518
# Convert that to '0463', '4H1503'.
global XCODE_VERSION_CACHE
if XCODE_VERSION_CACHE:
return XCODE_VERSION_CACHE
try:
version_list = GetStdout(['xcodebuild', '-version']).splitlines()
# In some circumstances xcodebuild exits 0 but doesn't return
# the right results; for example, a user on 10.7 or 10.8 with
# a bogus path set via xcode-select
# In that case this may be a CLT-only install so fall back to
# checking that version.
if len(version_list) < 2:
raise GypError("xcodebuild returned unexpected results")
except:
version = CLTVersion()
if version:
version = re.match(r'(\d\.\d\.?\d*)', version).groups()[0]
else:
raise GypError("No Xcode or CLT version detected!")
# The CLT has no build information, so we return an empty string.
version_list = [version, '']
version = version_list[0]
build = version_list[-1]
# Be careful to convert "4.2" to "0420":
version = version.split()[-1].replace('.', '')
version = (version + '0' * (3 - len(version))).zfill(4)
if build:
build = build.split()[-1]
XCODE_VERSION_CACHE = (version, build)
return XCODE_VERSION_CACHE
# This function ported from the logic in Homebrew's CLT version check
def CLTVersion():
"""Returns the version of command-line tools from pkgutil."""
# pkgutil output looks like
# package-id: com.apple.pkg.CLTools_Executables
# version: 5.0.1.0.1.1382131676
# volume: /
# location: /
# install-time: 1382544035
# groups: com.apple.FindSystemFiles.pkg-group com.apple.DevToolsBoth.pkg-group com.apple.DevToolsNonRelocatableShared.pkg-group
STANDALONE_PKG_ID = "com.apple.pkg.DeveloperToolsCLILeo"
FROM_XCODE_PKG_ID = "com.apple.pkg.DeveloperToolsCLI"
MAVERICKS_PKG_ID = "com.apple.pkg.CLTools_Executables"
regex = re.compile('version: (?P<version>.+)')
for key in [MAVERICKS_PKG_ID, STANDALONE_PKG_ID, FROM_XCODE_PKG_ID]:
try:
output = GetStdout(['/usr/sbin/pkgutil', '--pkg-info', key])
return re.search(regex, output).groupdict()['version']
except:
continue
def GetStdout(cmdlist):
"""Returns the content of standard output returned by invoking |cmdlist|.
Raises |GypError| if the command return with a non-zero return code."""
job = subprocess.Popen(cmdlist, stdout=subprocess.PIPE)
out = job.communicate()[0]
if job.returncode != 0:
sys.stderr.write(out + '\n')
raise GypError('Error %d running %s' % (job.returncode, cmdlist[0]))
return out.rstrip('\n')
def MergeGlobalXcodeSettingsToSpec(global_dict, spec):
"""Merges the global xcode_settings dictionary into each configuration of the
target represented by spec. For keys that are both in the global and the local
xcode_settings dict, the local key gets precendence.
"""
# The xcode generator special-cases global xcode_settings and does something
# that amounts to merging in the global xcode_settings into each local
# xcode_settings dict.
global_xcode_settings = global_dict.get('xcode_settings', {})
for config in spec['configurations'].values():
if 'xcode_settings' in config:
new_settings = global_xcode_settings.copy()
new_settings.update(config['xcode_settings'])
config['xcode_settings'] = new_settings
def IsMacBundle(flavor, spec):
"""Returns if |spec| should be treated as a bundle.
Bundles are directories with a certain subdirectory structure, instead of
just a single file. Bundle rules do not produce a binary but also package
resources into that directory."""
is_mac_bundle = (int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac')
if is_mac_bundle:
assert spec['type'] != 'none', (
'mac_bundle targets cannot have type none (target "%s")' %
spec['target_name'])
return is_mac_bundle
def GetMacBundleResources(product_dir, xcode_settings, resources):
"""Yields (output, resource) pairs for every resource in |resources|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
resources: A list of bundle resources, relative to the build directory.
"""
dest = os.path.join(product_dir,
xcode_settings.GetBundleResourceFolder())
for res in resources:
output = dest
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in res, (
"Spaces in resource filenames not supported (%s)" % res)
# Split into (path,file).
res_parts = os.path.split(res)
# Now split the path into (prefix,maybe.lproj).
lproj_parts = os.path.split(res_parts[0])
# If the resource lives in a .lproj bundle, add that to the destination.
if lproj_parts[1].endswith('.lproj'):
output = os.path.join(output, lproj_parts[1])
output = os.path.join(output, res_parts[1])
# Compiled XIB files are referred to by .nib.
if output.endswith('.xib'):
output = os.path.splitext(output)[0] + '.nib'
# Compiled storyboard files are referred to by .storyboardc.
if output.endswith('.storyboard'):
output = os.path.splitext(output)[0] + '.storyboardc'
yield output, res
def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path):
"""Returns (info_plist, dest_plist, defines, extra_env), where:
* |info_plist| is the source plist path, relative to the
build directory,
* |dest_plist| is the destination plist path, relative to the
build directory,
* |defines| is a list of preprocessor defines (empty if the plist
shouldn't be preprocessed,
* |extra_env| is a dict of env variables that should be exported when
invoking |mac_tool copy-info-plist|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE')
if not info_plist:
return None, None, [], {}
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in info_plist, (
"Spaces in Info.plist filenames not supported (%s)" % info_plist)
info_plist = gyp_path_to_build_path(info_plist)
# If explicitly set to preprocess the plist, invoke the C preprocessor and
# specify any defines as -D flags.
if xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESS', default='NO') == 'YES':
# Create an intermediate file based on the path.
defines = shlex.split(xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESSOR_DEFINITIONS', default=''))
else:
defines = []
dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath())
extra_env = xcode_settings.GetPerTargetSettings()
return info_plist, dest_plist, defines, extra_env
def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings=None):
"""Return the environment variables that Xcode would set. See
http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153
for a full list.
Args:
xcode_settings: An XcodeSettings object. If this is None, this function
returns an empty dict.
built_products_dir: Absolute path to the built products dir.
srcroot: Absolute path to the source root.
configuration: The build configuration name.
additional_settings: An optional dict with more values to add to the
result.
"""
if not xcode_settings: return {}
# This function is considered a friend of XcodeSettings, so let it reach into
# its implementation details.
spec = xcode_settings.spec
# These are filled in on a as-needed basis.
env = {
'BUILT_FRAMEWORKS_DIR' : built_products_dir,
'BUILT_PRODUCTS_DIR' : built_products_dir,
'CONFIGURATION' : configuration,
'PRODUCT_NAME' : xcode_settings.GetProductName(),
# See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME
'SRCROOT' : srcroot,
'SOURCE_ROOT': '${SRCROOT}',
# This is not true for static libraries, but currently the env is only
# written for bundles:
'TARGET_BUILD_DIR' : built_products_dir,
'TEMP_DIR' : '${TMPDIR}',
}
if xcode_settings.GetPerConfigSetting('SDKROOT', configuration):
env['SDKROOT'] = xcode_settings._SdkPath(configuration)
else:
env['SDKROOT'] = ''
if spec['type'] in (
'executable', 'static_library', 'shared_library', 'loadable_module'):
env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName()
env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath()
env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName()
mach_o_type = xcode_settings.GetMachOType()
if mach_o_type:
env['MACH_O_TYPE'] = mach_o_type
env['PRODUCT_TYPE'] = xcode_settings.GetProductType()
if xcode_settings._IsBundle():
env['CONTENTS_FOLDER_PATH'] = \
xcode_settings.GetBundleContentsFolderPath()
env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \
xcode_settings.GetBundleResourceFolder()
env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath()
env['WRAPPER_NAME'] = xcode_settings.GetWrapperName()
install_name = xcode_settings.GetInstallName()
if install_name:
env['LD_DYLIB_INSTALL_NAME'] = install_name
install_name_base = xcode_settings.GetInstallNameBase()
if install_name_base:
env['DYLIB_INSTALL_NAME_BASE'] = install_name_base
if XcodeVersion() >= '0500' and not env.get('SDKROOT'):
sdk_root = xcode_settings._SdkRoot(configuration)
if not sdk_root:
sdk_root = xcode_settings._XcodeSdkPath('')
if sdk_root is None:
sdk_root = ''
env['SDKROOT'] = sdk_root
if not additional_settings:
additional_settings = {}
else:
# Flatten lists to strings.
for k in additional_settings:
if not isinstance(additional_settings[k], str):
additional_settings[k] = ' '.join(additional_settings[k])
additional_settings.update(env)
for k in additional_settings:
additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k])
return additional_settings
def _NormalizeEnvVarReferences(str):
"""Takes a string containing variable references in the form ${FOO}, $(FOO),
or $FOO, and returns a string with all variable references in the form ${FOO}.
"""
# $FOO -> ${FOO}
str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str)
# $(FOO) -> ${FOO}
matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str)
for match in matches:
to_replace, variable = match
assert '$(' not in match, '$($(FOO)) variables not supported: ' + match
str = str.replace(to_replace, '${' + variable + '}')
return str
def ExpandEnvVars(string, expansions):
"""Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the
expansions list. If the variable expands to something that references
another variable, this variable is expanded as well if it's in env --
until no variables present in env are left."""
for k, v in reversed(expansions):
string = string.replace('${' + k + '}', v)
string = string.replace('$(' + k + ')', v)
string = string.replace('$' + k, v)
return string
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError, e:
raise GypError(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes))
def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot,
configuration, additional_settings=None):
env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings)
return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)]
def GetSpecPostbuildCommands(spec, quiet=False):
"""Returns the list of postbuilds explicitly defined on |spec|, in a form
executable by a shell."""
postbuilds = []
for postbuild in spec.get('postbuilds', []):
if not quiet:
postbuilds.append('echo POSTBUILD\\(%s\\) %s' % (
spec['target_name'], postbuild['postbuild_name']))
postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action']))
return postbuilds
def _HasIOSTarget(targets):
"""Returns true if any target contains the iOS specific key
IPHONEOS_DEPLOYMENT_TARGET."""
for target_dict in targets.values():
for config in target_dict['configurations'].values():
if config.get('xcode_settings', {}).get('IPHONEOS_DEPLOYMENT_TARGET'):
return True
return False
def _AddIOSDeviceConfigurations(targets):
"""Clone all targets and append -iphoneos to the name. Configure these targets
to build for iOS devices and use correct architectures for those builds."""
for target_dict in targets.itervalues():
toolset = target_dict['toolset']
configs = target_dict['configurations']
for config_name, config_dict in dict(configs).iteritems():
iphoneos_config_dict = copy.deepcopy(config_dict)
configs[config_name + '-iphoneos'] = iphoneos_config_dict
configs[config_name + '-iphonesimulator'] = config_dict
if toolset == 'target':
iphoneos_config_dict['xcode_settings']['SDKROOT'] = 'iphoneos'
return targets
def CloneConfigurationForDeviceAndEmulator(target_dicts):
"""If |target_dicts| contains any iOS targets, automatically create -iphoneos
targets for iOS device builds."""
if _HasIOSTarget(target_dicts):
return _AddIOSDeviceConfigurations(target_dicts)
return target_dicts
| mit |
leiferikb/bitpop | build/third_party/twisted_10_2/twisted/words/iwords.py | 57 | 8545 | # -*- test-case-name: twisted.words.test -*-
# Copyright (c) 2001-2005 Twisted Matrix Laboratories.
# See LICENSE for details.
from zope.interface import Interface, Attribute, implements
class IProtocolPlugin(Interface):
"""Interface for plugins providing an interface to a Words service
"""
name = Attribute("A single word describing what kind of interface this is (eg, irc or web)")
def getFactory(realm, portal):
"""Retrieve a C{twisted.internet.interfaces.IServerFactory} provider
@param realm: An object providing C{twisted.cred.portal.IRealm} and
C{IChatService}, with which service information should be looked up.
@param portal: An object providing C{twisted.cred.portal.IPortal},
through which logins should be performed.
"""
class IGroup(Interface):
name = Attribute("A short string, unique among groups.")
def add(user):
"""Include the given user in this group.
@type user: L{IUser}
"""
def remove(user, reason=None):
"""Remove the given user from this group.
@type user: L{IUser}
@type reason: C{unicode}
"""
def size():
"""Return the number of participants in this group.
@rtype: L{twisted.internet.defer.Deferred}
@return: A Deferred which fires with an C{int} representing the the
number of participants in this group.
"""
def receive(sender, recipient, message):
"""
Broadcast the given message from the given sender to other
users in group.
The message is not re-transmitted to the sender.
@param sender: L{IUser}
@type recipient: L{IGroup}
@param recipient: This is probably a wart. Maybe it will be removed
in the future. For now, it should be the group object the message
is being delivered to.
@param message: C{dict}
@rtype: L{twisted.internet.defer.Deferred}
@return: A Deferred which fires with None when delivery has been
attempted for all users.
"""
def setMetadata(meta):
"""Change the metadata associated with this group.
@type meta: C{dict}
"""
def iterusers():
"""Return an iterator of all users in this group.
"""
class IChatClient(Interface):
"""Interface through which IChatService interacts with clients.
"""
name = Attribute("A short string, unique among users. This will be set by the L{IChatService} at login time.")
def receive(sender, recipient, message):
"""
Callback notifying this user of the given message sent by the
given user.
This will be invoked whenever another user sends a message to a
group this user is participating in, or whenever another user sends
a message directly to this user. In the former case, C{recipient}
will be the group to which the message was sent; in the latter, it
will be the same object as the user who is receiving the message.
@type sender: L{IUser}
@type recipient: L{IUser} or L{IGroup}
@type message: C{dict}
@rtype: L{twisted.internet.defer.Deferred}
@return: A Deferred which fires when the message has been delivered,
or which fails in some way. If the Deferred fails and the message
was directed at a group, this user will be removed from that group.
"""
def groupMetaUpdate(group, meta):
"""
Callback notifying this user that the metadata for the given
group has changed.
@type group: L{IGroup}
@type meta: C{dict}
@rtype: L{twisted.internet.defer.Deferred}
"""
def userJoined(group, user):
"""
Callback notifying this user that the given user has joined
the given group.
@type group: L{IGroup}
@type user: L{IUser}
@rtype: L{twisted.internet.defer.Deferred}
"""
def userLeft(group, user, reason=None):
"""
Callback notifying this user that the given user has left the
given group for the given reason.
@type group: L{IGroup}
@type user: L{IUser}
@type reason: C{unicode}
@rtype: L{twisted.internet.defer.Deferred}
"""
class IUser(Interface):
"""Interface through which clients interact with IChatService.
"""
realm = Attribute("A reference to the Realm to which this user belongs. Set if and only if the user is logged in.")
mind = Attribute("A reference to the mind which logged in to this user. Set if and only if the user is logged in.")
name = Attribute("A short string, unique among users.")
lastMessage = Attribute("A POSIX timestamp indicating the time of the last message received from this user.")
signOn = Attribute("A POSIX timestamp indicating this user's most recent sign on time.")
def loggedIn(realm, mind):
"""Invoked by the associated L{IChatService} when login occurs.
@param realm: The L{IChatService} through which login is occurring.
@param mind: The mind object used for cred login.
"""
def send(recipient, message):
"""Send the given message to the given user or group.
@type recipient: Either L{IUser} or L{IGroup}
@type message: C{dict}
"""
def join(group):
"""Attempt to join the given group.
@type group: L{IGroup}
@rtype: L{twisted.internet.defer.Deferred}
"""
def leave(group):
"""Discontinue participation in the given group.
@type group: L{IGroup}
@rtype: L{twisted.internet.defer.Deferred}
"""
def itergroups():
"""
Return an iterator of all groups of which this user is a
member.
"""
class IChatService(Interface):
name = Attribute("A short string identifying this chat service (eg, a hostname)")
createGroupOnRequest = Attribute(
"A boolean indicating whether L{getGroup} should implicitly "
"create groups which are requested but which do not yet exist.")
createUserOnRequest = Attribute(
"A boolean indicating whether L{getUser} should implicitly "
"create users which are requested but which do not yet exist.")
def itergroups():
"""Return all groups available on this service.
@rtype: C{twisted.internet.defer.Deferred}
@return: A Deferred which fires with a list of C{IGroup} providers.
"""
def getGroup(name):
"""Retrieve the group by the given name.
@type name: C{str}
@rtype: L{twisted.internet.defer.Deferred}
@return: A Deferred which fires with the group with the given
name if one exists (or if one is created due to the setting of
L{createGroupOnRequest}, or which fails with
L{twisted.words.ewords.NoSuchGroup} if no such group exists.
"""
def createGroup(name):
"""Create a new group with the given name.
@type name: C{str}
@rtype: L{twisted.internet.defer.Deferred}
@return: A Deferred which fires with the created group, or
with fails with L{twisted.words.ewords.DuplicateGroup} if a
group by that name exists already.
"""
def lookupGroup(name):
"""Retrieve a group by name.
Unlike C{getGroup}, this will never implicitly create a group.
@type name: C{str}
@rtype: L{twisted.internet.defer.Deferred}
@return: A Deferred which fires with the group by the given
name, or which fails with L{twisted.words.ewords.NoSuchGroup}.
"""
def getUser(name):
"""Retrieve the user by the given name.
@type name: C{str}
@rtype: L{twisted.internet.defer.Deferred}
@return: A Deferred which fires with the user with the given
name if one exists (or if one is created due to the setting of
L{createUserOnRequest}, or which fails with
L{twisted.words.ewords.NoSuchUser} if no such user exists.
"""
def createUser(name):
"""Create a new user with the given name.
@type name: C{str}
@rtype: L{twisted.internet.defer.Deferred}
@return: A Deferred which fires with the created user, or
with fails with L{twisted.words.ewords.DuplicateUser} if a
user by that name exists already.
"""
__all__ = [
'IChatInterface', 'IGroup', 'IChatClient', 'IUser', 'IChatService',
]
| gpl-3.0 |
chirilo/remo | vendor-local/lib/python/docutils/math/latex2mathml.py | 6 | 20653 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# :Id: $Id: latex2mathml.py 7058 2011-06-27 11:38:56Z milde $
# :Copyright: © 2010 Günter Milde.
# Based on rst2mathml.py from the latex_math sandbox project
# © 2005 Jens Jørgen Mortensen
# :License: Released under the terms of the `2-Clause BSD license`_, in short:
#
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
# This file is offered as-is, without any warranty.
#
# .. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause
"""Convert LaTex math code into presentational MathML"""
# Based on the `latex_math` sandbox project by Jens Jørgen Mortensen
# LaTeX to MathML translation stuff:
class math:
"""Base class for MathML elements."""
nchildren = 1000000
"""Required number of children"""
def __init__(self, children=None, inline=None):
"""math([children]) -> MathML element
children can be one child or a list of children."""
self.children = []
if children is not None:
if type(children) is list:
for child in children:
self.append(child)
else:
# Only one child:
self.append(children)
if inline is not None:
self.inline = inline
def __repr__(self):
if hasattr(self, 'children'):
return self.__class__.__name__ + '(%s)' % \
','.join([repr(child) for child in self.children])
else:
return self.__class__.__name__
def full(self):
"""Room for more children?"""
return len(self.children) >= self.nchildren
def append(self, child):
"""append(child) -> element
Appends child and returns self if self is not full or first
non-full parent."""
assert not self.full()
self.children.append(child)
child.parent = self
node = self
while node.full():
node = node.parent
return node
def delete_child(self):
"""delete_child() -> child
Delete last child and return it."""
child = self.children[-1]
del self.children[-1]
return child
def close(self):
"""close() -> parent
Close element and return first non-full element."""
parent = self.parent
while parent.full():
parent = parent.parent
return parent
def xml(self):
"""xml() -> xml-string"""
return self.xml_start() + self.xml_body() + self.xml_end()
def xml_start(self):
if not hasattr(self, 'inline'):
return ['<%s>' % self.__class__.__name__]
xmlns = 'http://www.w3.org/1998/Math/MathML'
if self.inline:
return ['<math xmlns="%s">' % xmlns]
else:
return ['<math xmlns="%s" mode="display">' % xmlns]
def xml_end(self):
return ['</%s>' % self.__class__.__name__]
def xml_body(self):
xml = []
for child in self.children:
xml.extend(child.xml())
return xml
class mrow(math):
def xml_start(self):
return ['\n<%s>' % self.__class__.__name__]
class mtable(math):
def xml_start(self):
return ['\n<%s>' % self.__class__.__name__]
class mtr(mrow): pass
class mtd(mrow): pass
class mx(math):
"""Base class for mo, mi, and mn"""
nchildren = 0
def __init__(self, data):
self.data = data
def xml_body(self):
return [self.data]
class mo(mx):
translation = {'<': '<', '>': '>'}
def xml_body(self):
return [self.translation.get(self.data, self.data)]
class mi(mx): pass
class mn(mx): pass
class msub(math):
nchildren = 2
class msup(math):
nchildren = 2
class msqrt(math):
nchildren = 1
class mroot(math):
nchildren = 2
class mfrac(math):
nchildren = 2
class msubsup(math):
nchildren = 3
def __init__(self, children=None, reversed=False):
self.reversed = reversed
math.__init__(self, children)
def xml(self):
if self.reversed:
## self.children[1:3] = self.children[2:0:-1]
self.children[1:3] = [self.children[2], self.children[1]]
self.reversed = False
return math.xml(self)
class mfenced(math):
translation = {'\\{': '{', '\\langle': u'\u2329',
'\\}': '}', '\\rangle': u'\u232A',
'.': ''}
def __init__(self, par):
self.openpar = par
math.__init__(self)
def xml_start(self):
open = self.translation.get(self.openpar, self.openpar)
close = self.translation.get(self.closepar, self.closepar)
return ['<mfenced open="%s" close="%s">' % (open, close)]
class mspace(math):
nchildren = 0
class mstyle(math):
def __init__(self, children=None, nchildren=None, **kwargs):
if nchildren is not None:
self.nchildren = nchildren
math.__init__(self, children)
self.attrs = kwargs
def xml_start(self):
return ['<mstyle '] + ['%s="%s"' % item
for item in self.attrs.items()] + ['>']
class mover(math):
nchildren = 2
def __init__(self, children=None, reversed=False):
self.reversed = reversed
math.__init__(self, children)
def xml(self):
if self.reversed:
self.children.reverse()
self.reversed = False
return math.xml(self)
class munder(math):
nchildren = 2
class munderover(math):
nchildren = 3
def __init__(self, children=None):
math.__init__(self, children)
class mtext(math):
nchildren = 0
def __init__(self, text):
self.text = text
def xml_body(self):
return [self.text]
# TeX spacing combining
over = {'acute': u'\u00B4', # u'\u0301',
'bar': u'\u00AF', # u'\u0304',
'breve': u'\u02D8', # u'\u0306',
'check': u'\u02C7', # u'\u030C',
'dot': u'\u02D9', # u'\u0307',
'ddot': u'\u00A8', # u'\u0308',
'dddot': u'\u20DB',
'grave': u'`', # u'\u0300',
'hat': u'^', # u'\u0302',
'tilde': u'\u02DC', # u'\u0303',
# 'overline': # u'\u0305',
'vec': u'\u20D7'}
Greek = { # Upper case greek letters:
'Phi':u'\u03a6', 'Xi':u'\u039e', 'Sigma':u'\u03a3',
'Psi':u'\u03a8', 'Delta':u'\u0394', 'Theta':u'\u0398',
'Upsilon':u'\u03d2', 'Pi':u'\u03a0', 'Omega':u'\u03a9',
'Gamma':u'\u0393', 'Lambda':u'\u039b'}
letters = { # Lower case greek letters (and dotless i, j):
# 'imath':u'i', 'jmath':u'i', # when used with combining accents
'imath':u'\u0131', 'jmath':u'\u0237',
'tau':u'\u03c4', 'phi':u'\u03d5', 'xi':u'\u03be', 'iota':u'\u03b9',
'epsilon':u'\u03f5', 'varrho':u'\u03f1', 'varsigma':u'\u03c2',
'beta':u'\u03b2', 'psi':u'\u03c8', 'rho':u'\u03c1',
'delta':u'\u03b4', 'alpha':u'\u03b1', 'zeta':u'\u03b6',
'omega':u'\u03c9', 'varepsilon':u'\u03b5', 'kappa':u'\u03ba',
'vartheta':u'\u03d1', 'chi':u'\u03c7', 'upsilon':u'\u03c5',
'sigma':u'\u03c3', 'varphi':u'\u03c6', 'varpi':u'\u03d6',
'mu':u'\u03bc', 'eta':u'\u03b7', 'theta':u'\u03b8', 'pi':u'\u03c0',
'varkappa':u'\u03f0', 'nu':u'\u03bd', 'gamma':u'\u03b3',
'lambda':u'\u03bb'}
special = {
# Binary operation symbols:
'wedge':u'\u2227', 'diamond':u'\u22c4', 'star':u'\u22c6',
'amalg':u'\u2a3f', 'ast':u'\u2217', 'odot':u'\u2299',
'triangleleft':u'\u25c1', 'bigtriangleup':u'\u25b3',
'ominus':u'\u2296', 'ddagger':u'\u2021', 'wr':u'\u2240',
'otimes':u'\u2297', 'sqcup':u'\u2294', 'oplus':u'\u2295',
'bigcirc':u'\u25cb', 'oslash':u'\u2298', 'sqcap':u'\u2293',
'bullet':u'\u2219', 'cup':u'\u222a', 'cdot':u'\u22c5',
'cap':u'\u2229', 'bigtriangledown':u'\u25bd', 'times':u'\xd7',
'setminus':u'\u2216', 'circ':u'\u2218', 'vee':u'\u2228',
'uplus':u'\u228e', 'mp':u'\u2213', 'dagger':u'\u2020',
'triangleright':u'\u25b7', 'div':u'\xf7', 'pm':u'\xb1',
# Relation symbols:
'subset':u'\u2282', 'propto':u'\u221d', 'geq':u'\u2265',
'ge':u'\u2265', 'sqsubset':u'\u228f', 'Join':u'\u2a1d',
'frown':u'\u2322', 'models':u'\u22a7', 'supset':u'\u2283',
'in':u'\u2208', 'doteq':u'\u2250', 'dashv':u'\u22a3',
'gg':u'\u226b', 'leq':u'\u2264', 'succ':u'\u227b',
'vdash':u'\u22a2', 'cong':u'\u2245', 'simeq':u'\u2243',
'subseteq':u'\u2286', 'parallel':u'\u2225', 'equiv':u'\u2261',
'ni':u'\u220b', 'le':u'\u2264', 'approx':u'\u2248',
'precsim':u'\u227e', 'sqsupset':u'\u2290', 'll':u'\u226a',
'sqsupseteq':u'\u2292', 'mid':u'\u2223', 'prec':u'\u227a',
'succsim':u'\u227f', 'bowtie':u'\u22c8', 'perp':u'\u27c2',
'sqsubseteq':u'\u2291', 'asymp':u'\u224d', 'smile':u'\u2323',
'supseteq':u'\u2287', 'sim':u'\u223c', 'neq':u'\u2260',
# Arrow symbols:
'searrow':u'\u2198', 'updownarrow':u'\u2195', 'Uparrow':u'\u21d1',
'longleftrightarrow':u'\u27f7', 'Leftarrow':u'\u21d0',
'longmapsto':u'\u27fc', 'Longleftarrow':u'\u27f8',
'nearrow':u'\u2197', 'hookleftarrow':u'\u21a9',
'downarrow':u'\u2193', 'Leftrightarrow':u'\u21d4',
'longrightarrow':u'\u27f6', 'rightharpoondown':u'\u21c1',
'longleftarrow':u'\u27f5', 'rightarrow':u'\u2192',
'Updownarrow':u'\u21d5', 'rightharpoonup':u'\u21c0',
'Longleftrightarrow':u'\u27fa', 'leftarrow':u'\u2190',
'mapsto':u'\u21a6', 'nwarrow':u'\u2196', 'uparrow':u'\u2191',
'leftharpoonup':u'\u21bc', 'leftharpoondown':u'\u21bd',
'Downarrow':u'\u21d3', 'leftrightarrow':u'\u2194',
'Longrightarrow':u'\u27f9', 'swarrow':u'\u2199',
'hookrightarrow':u'\u21aa', 'Rightarrow':u'\u21d2',
'to':u'\u2192',
# Miscellaneous symbols:
'infty':u'\u221e', 'surd':u'\u221a',
'partial':u'\u2202', 'ddots':u'\u22f1', 'exists':u'\u2203',
'flat':u'\u266d', 'diamondsuit':u'\u2662', 'wp':u'\u2118',
'spadesuit':u'\u2660', 'Re':u'\u211c', 'vdots':u'\u22ee',
'aleph':u'\u2135', 'clubsuit':u'\u2663', 'sharp':u'\u266f',
'angle':u'\u2220', 'prime':u'\u2032', 'natural':u'\u266e',
'ell':u'\u2113', 'neg':u'\xac', 'top':u'\u22a4', 'nabla':u'\u2207',
'bot':u'\u22a5', 'heartsuit':u'\u2661', 'cdots':u'\u22ef',
'Im':u'\u2111', 'forall':u'\u2200',
'hbar':u'\u210f', 'emptyset':u'\u2205',
# Variable-sized symbols:
'bigotimes':u'\u2a02', 'coprod':u'\u2210', 'int':u'\u222b',
'sum':u'\u2211', 'bigodot':u'\u2a00', 'bigcup':u'\u22c3',
'biguplus':u'\u2a04', 'bigcap':u'\u22c2', 'bigoplus':u'\u2a01',
'oint':u'\u222e', 'bigvee':u'\u22c1', 'bigwedge':u'\u22c0',
'prod':u'\u220f',
# Braces:
'langle':u'\u2329', 'rangle':u'\u232A'}
sumintprod = ''.join([special[symbol] for symbol in
['sum', 'int', 'oint', 'prod']])
functions = ['arccos', 'arcsin', 'arctan', 'arg', 'cos', 'cosh',
'cot', 'coth', 'csc', 'deg', 'det', 'dim',
'exp', 'gcd', 'hom', 'inf', 'ker', 'lg',
'lim', 'liminf', 'limsup', 'ln', 'log', 'max',
'min', 'Pr', 'sec', 'sin', 'sinh', 'sup',
'tan', 'tanh',
'injlim', 'varinjlim', 'varlimsup',
'projlim', 'varliminf', 'varprojlim']
def parse_latex_math(string, inline=True):
"""parse_latex_math(string [,inline]) -> MathML-tree
Returns a MathML-tree parsed from string. inline=True is for
inline math and inline=False is for displayed math.
tree is the whole tree and node is the current element."""
# Normalize white-space:
string = ' '.join(string.split())
if inline:
node = mrow()
tree = math(node, inline=True)
else:
node = mtd()
tree = math(mtable(mtr(node)), inline=False)
while len(string) > 0:
n = len(string)
c = string[0]
skip = 1 # number of characters consumed
if n > 1:
c2 = string[1]
else:
c2 = ''
## print n, string, c, c2, node.__class__.__name__
if c == ' ':
pass
elif c == '\\':
if c2 in '{}':
node = node.append(mo(c2))
skip = 2
elif c2 == ' ':
node = node.append(mspace())
skip = 2
elif c2 == ',': # TODO: small space
node = node.append(mspace())
skip = 2
elif c2.isalpha():
# We have a LaTeX-name:
i = 2
while i < n and string[i].isalpha():
i += 1
name = string[1:i]
node, skip = handle_keyword(name, node, string[i:])
skip += i
elif c2 == '\\':
# End of a row:
entry = mtd()
row = mtr(entry)
node.close().close().append(row)
node = entry
skip = 2
else:
raise SyntaxError(ur'Syntax error: "%s%s"' % (c, c2))
elif c.isalpha():
node = node.append(mi(c))
elif c.isdigit():
node = node.append(mn(c))
elif c in "+-*/=()[]|<>,.!?':;@":
node = node.append(mo(c))
elif c == '_':
child = node.delete_child()
if isinstance(child, msup):
sub = msubsup(child.children, reversed=True)
elif isinstance(child, mo) and child.data in sumintprod:
sub = munder(child)
else:
sub = msub(child)
node.append(sub)
node = sub
elif c == '^':
child = node.delete_child()
if isinstance(child, msub):
sup = msubsup(child.children)
elif isinstance(child, mo) and child.data in sumintprod:
sup = mover(child)
elif (isinstance(child, munder) and
child.children[0].data in sumintprod):
sup = munderover(child.children)
else:
sup = msup(child)
node.append(sup)
node = sup
elif c == '{':
row = mrow()
node.append(row)
node = row
elif c == '}':
node = node.close()
elif c == '&':
entry = mtd()
node.close().append(entry)
node = entry
else:
raise SyntaxError(ur'Illegal character: "%s"' % c)
string = string[skip:]
return tree
mathbb = {
'A': u'\U0001D538',
'B': u'\U0001D539',
'C': u'\u2102',
'D': u'\U0001D53B',
'E': u'\U0001D53C',
'F': u'\U0001D53D',
'G': u'\U0001D53E',
'H': u'\u210D',
'I': u'\U0001D540',
'J': u'\U0001D541',
'K': u'\U0001D542',
'L': u'\U0001D543',
'M': u'\U0001D544',
'N': u'\u2115',
'O': u'\U0001D546',
'P': u'\u2119',
'Q': u'\u211A',
'R': u'\u211D',
'S': u'\U0001D54A',
'T': u'\U0001D54B',
'U': u'\U0001D54C',
'V': u'\U0001D54D',
'W': u'\U0001D54E',
'X': u'\U0001D54F',
'Y': u'\U0001D550',
'Z': u'\u2124',
}
mathscr = {
'A': u'\U0001D49C',
'B': u'\u212C', # bernoulli function
'C': u'\U0001D49E',
'D': u'\U0001D49F',
'E': u'\u2130',
'F': u'\u2131',
'G': u'\U0001D4A2',
'H': u'\u210B', # hamiltonian
'I': u'\u2110',
'J': u'\U0001D4A5',
'K': u'\U0001D4A6',
'L': u'\u2112', # lagrangian
'M': u'\u2133', # physics m-matrix
'N': u'\U0001D4A9',
'O': u'\U0001D4AA',
'P': u'\U0001D4AB',
'Q': u'\U0001D4AC',
'R': u'\u211B',
'S': u'\U0001D4AE',
'T': u'\U0001D4AF',
'U': u'\U0001D4B0',
'V': u'\U0001D4B1',
'W': u'\U0001D4B2',
'X': u'\U0001D4B3',
'Y': u'\U0001D4B4',
'Z': u'\U0001D4B5',
'a': u'\U0001D4B6',
'b': u'\U0001D4B7',
'c': u'\U0001D4B8',
'd': u'\U0001D4B9',
'e': u'\u212F',
'f': u'\U0001D4BB',
'g': u'\u210A',
'h': u'\U0001D4BD',
'i': u'\U0001D4BE',
'j': u'\U0001D4BF',
'k': u'\U0001D4C0',
'l': u'\U0001D4C1',
'm': u'\U0001D4C2',
'n': u'\U0001D4C3',
'o': u'\u2134', # order of
'p': u'\U0001D4C5',
'q': u'\U0001D4C6',
'r': u'\U0001D4C7',
's': u'\U0001D4C8',
't': u'\U0001D4C9',
'u': u'\U0001D4CA',
'v': u'\U0001D4CB',
'w': u'\U0001D4CC',
'x': u'\U0001D4CD',
'y': u'\U0001D4CE',
'z': u'\U0001D4CF',
}
negatables = {'=': u'\u2260',
'\in': u'\u2209',
'\equiv': u'\u2262'}
def handle_keyword(name, node, string):
skip = 0
if len(string) > 0 and string[0] == ' ':
string = string[1:]
skip = 1
if name == 'begin':
if not string.startswith('{matrix}'):
raise SyntaxError(u'Environment not supported! '
u'Supported environment: "matrix".')
skip += 8
entry = mtd()
table = mtable(mtr(entry))
node.append(table)
node = entry
elif name == 'end':
if not string.startswith('{matrix}'):
raise SyntaxError(ur'Expected "\end{matrix}"!')
skip += 8
node = node.close().close().close()
elif name in ('text', 'mathrm'):
if string[0] != '{':
raise SyntaxError(ur'Expected "\text{...}"!')
i = string.find('}')
if i == -1:
raise SyntaxError(ur'Expected "\text{...}"!')
node = node.append(mtext(string[1:i]))
skip += i + 1
elif name == 'sqrt':
sqrt = msqrt()
node.append(sqrt)
node = sqrt
elif name == 'frac':
frac = mfrac()
node.append(frac)
node = frac
elif name == 'left':
for par in ['(', '[', '|', '\\{', '\\langle', '.']:
if string.startswith(par):
break
else:
raise SyntaxError(u'Missing left-brace!')
fenced = mfenced(par)
node.append(fenced)
row = mrow()
fenced.append(row)
node = row
skip += len(par)
elif name == 'right':
for par in [')', ']', '|', '\\}', '\\rangle', '.']:
if string.startswith(par):
break
else:
raise SyntaxError(u'Missing right-brace!')
node = node.close()
node.closepar = par
node = node.close()
skip += len(par)
elif name == 'not':
for operator in negatables:
if string.startswith(operator):
break
else:
raise SyntaxError(ur'Expected something to negate: "\not ..."!')
node = node.append(mo(negatables[operator]))
skip += len(operator)
elif name == 'mathbf':
style = mstyle(nchildren=1, fontweight='bold')
node.append(style)
node = style
elif name == 'mathbb':
if string[0] != '{' or not string[1].isupper() or string[2] != '}':
raise SyntaxError(ur'Expected something like "\mathbb{A}"!')
node = node.append(mi(mathbb[string[1]]))
skip += 3
elif name in ('mathscr', 'mathcal'):
if string[0] != '{' or string[2] != '}':
raise SyntaxError(ur'Expected something like "\mathscr{A}"!')
node = node.append(mi(mathscr[string[1]]))
skip += 3
elif name == 'colon': # "normal" colon, not binary operator
node = node.append(mo(':')) # TODO: add ``lspace="0pt"``
elif name in letters:
node = node.append(mi(letters[name]))
elif name in Greek:
node = node.append(mo(Greek[name]))
elif name in special:
node = node.append(mo(special[name]))
elif name in functions:
node = node.append(mo(name))
else:
chr = over.get(name)
if chr is not None:
ovr = mover(mo(chr), reversed=True)
node.append(ovr)
node = ovr
else:
raise SyntaxError(u'Unknown LaTeX command: ' + name)
return node, skip
| bsd-3-clause |
anirudhSK/chromium | native_client_sdk/src/build_tools/sdk_tools/third_party/fancy_urllib/__init__.py | 155 | 14277 | #!/usr/bin/env python
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007 Python Software
# Foundation; All Rights Reserved
"""A HTTPSConnection/Handler with additional proxy and cert validation features.
In particular, monkey patches in Python r74203 to provide support for CONNECT
proxies and adds SSL cert validation if the ssl module is present.
"""
__author__ = "{frew,nick.johnson}@google.com (Fred Wulff and Nick Johnson)"
import base64
import httplib
import logging
import re
import socket
import urllib2
from urllib import splittype
from urllib import splituser
from urllib import splitpasswd
class InvalidCertificateException(httplib.HTTPException):
"""Raised when a certificate is provided with an invalid hostname."""
def __init__(self, host, cert, reason):
"""Constructor.
Args:
host: The hostname the connection was made to.
cert: The SSL certificate (as a dictionary) the host returned.
"""
httplib.HTTPException.__init__(self)
self.host = host
self.cert = cert
self.reason = reason
def __str__(self):
return ('Host %s returned an invalid certificate (%s): %s\n'
'To learn more, see '
'http://code.google.com/appengine/kb/general.html#rpcssl' %
(self.host, self.reason, self.cert))
def can_validate_certs():
"""Return True if we have the SSL package and can validate certificates."""
try:
import ssl
return True
except ImportError:
return False
def _create_fancy_connection(tunnel_host=None, key_file=None,
cert_file=None, ca_certs=None):
# This abomination brought to you by the fact that
# the HTTPHandler creates the connection instance in the middle
# of do_open so we need to add the tunnel host to the class.
class PresetProxyHTTPSConnection(httplib.HTTPSConnection):
"""An HTTPS connection that uses a proxy defined by the enclosing scope."""
def __init__(self, *args, **kwargs):
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
self._tunnel_host = tunnel_host
if tunnel_host:
logging.debug("Creating preset proxy https conn: %s", tunnel_host)
self.key_file = key_file
self.cert_file = cert_file
self.ca_certs = ca_certs
try:
import ssl
if self.ca_certs:
self.cert_reqs = ssl.CERT_REQUIRED
else:
self.cert_reqs = ssl.CERT_NONE
except ImportError:
pass
def _tunnel(self):
self._set_hostport(self._tunnel_host, None)
logging.info("Connecting through tunnel to: %s:%d",
self.host, self.port)
self.send("CONNECT %s:%d HTTP/1.0\r\n\r\n" % (self.host, self.port))
response = self.response_class(self.sock, strict=self.strict,
method=self._method)
(_, code, message) = response._read_status()
if code != 200:
self.close()
raise socket.error, "Tunnel connection failed: %d %s" % (
code, message.strip())
while True:
line = response.fp.readline()
if line == "\r\n":
break
def _get_valid_hosts_for_cert(self, cert):
"""Returns a list of valid host globs for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
Returns:
list: A list of valid host globs.
"""
if 'subjectAltName' in cert:
return [x[1] for x in cert['subjectAltName'] if x[0].lower() == 'dns']
else:
# Return a list of commonName fields
return [x[0][1] for x in cert['subject']
if x[0][0].lower() == 'commonname']
def _validate_certificate_hostname(self, cert, hostname):
"""Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
"""
hosts = self._get_valid_hosts_for_cert(cert)
for host in hosts:
# Convert the glob-style hostname expression (eg, '*.google.com') into a
# valid regular expression.
host_re = host.replace('.', '\.').replace('*', '[^.]*')
if re.search('^%s$' % (host_re,), hostname, re.I):
return True
return False
def connect(self):
# TODO(frew): When we drop support for <2.6 (in the far distant future),
# change this to socket.create_connection.
self.sock = _create_connection((self.host, self.port))
if self._tunnel_host:
self._tunnel()
# ssl and FakeSocket got deprecated. Try for the new hotness of wrap_ssl,
# with fallback.
try:
import ssl
self.sock = ssl.wrap_socket(self.sock,
keyfile=self.key_file,
certfile=self.cert_file,
ca_certs=self.ca_certs,
cert_reqs=self.cert_reqs)
if self.cert_reqs & ssl.CERT_REQUIRED:
cert = self.sock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not self._validate_certificate_hostname(cert, hostname):
raise InvalidCertificateException(hostname, cert,
'hostname mismatch')
except ImportError:
ssl = socket.ssl(self.sock,
keyfile=self.key_file,
certfile=self.cert_file)
self.sock = httplib.FakeSocket(self.sock, ssl)
return PresetProxyHTTPSConnection
# Here to end of _create_connection copied wholesale from Python 2.6"s socket.py
_GLOBAL_DEFAULT_TIMEOUT = object()
def _create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used.
"""
msg = "getaddrinfo returns an empty list"
host, port = address
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
sock.connect(sa)
return sock
except socket.error, msg:
if sock is not None:
sock.close()
raise socket.error, msg
class FancyRequest(urllib2.Request):
"""A request that allows the use of a CONNECT proxy."""
def __init__(self, *args, **kwargs):
urllib2.Request.__init__(self, *args, **kwargs)
self._tunnel_host = None
self._key_file = None
self._cert_file = None
self._ca_certs = None
def set_proxy(self, host, type):
saved_type = None
if self.get_type() == "https" and not self._tunnel_host:
self._tunnel_host = self.get_host()
saved_type = self.get_type()
urllib2.Request.set_proxy(self, host, type)
if saved_type:
# Don't set self.type, we want to preserve the
# type for tunneling.
self.type = saved_type
def set_ssl_info(self, key_file=None, cert_file=None, ca_certs=None):
self._key_file = key_file
self._cert_file = cert_file
self._ca_certs = ca_certs
class FancyProxyHandler(urllib2.ProxyHandler):
"""A ProxyHandler that works with CONNECT-enabled proxies."""
# Taken verbatim from /usr/lib/python2.5/urllib2.py
def _parse_proxy(self, proxy):
"""Return (scheme, user, password, host/port) given a URL or an authority.
If a URL is supplied, it must have an authority (host:port) component.
According to RFC 3986, having an authority component means the URL must
have two slashes after the scheme:
>>> _parse_proxy('file:/ftp.example.com/')
Traceback (most recent call last):
ValueError: proxy URL with no authority: 'file:/ftp.example.com/'
The first three items of the returned tuple may be None.
Examples of authority parsing:
>>> _parse_proxy('proxy.example.com')
(None, None, None, 'proxy.example.com')
>>> _parse_proxy('proxy.example.com:3128')
(None, None, None, 'proxy.example.com:3128')
The authority component may optionally include userinfo (assumed to be
username:password):
>>> _parse_proxy('joe:password@proxy.example.com')
(None, 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('joe:password@proxy.example.com:3128')
(None, 'joe', 'password', 'proxy.example.com:3128')
Same examples, but with URLs instead:
>>> _parse_proxy('http://proxy.example.com/')
('http', None, None, 'proxy.example.com')
>>> _parse_proxy('http://proxy.example.com:3128/')
('http', None, None, 'proxy.example.com:3128')
>>> _parse_proxy('http://joe:password@proxy.example.com/')
('http', 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('http://joe:password@proxy.example.com:3128')
('http', 'joe', 'password', 'proxy.example.com:3128')
Everything after the authority is ignored:
>>> _parse_proxy('ftp://joe:password@proxy.example.com/rubbish:3128')
('ftp', 'joe', 'password', 'proxy.example.com')
Test for no trailing '/' case:
>>> _parse_proxy('http://joe:password@proxy.example.com')
('http', 'joe', 'password', 'proxy.example.com')
"""
scheme, r_scheme = splittype(proxy)
if not r_scheme.startswith("/"):
# authority
scheme = None
authority = proxy
else:
# URL
if not r_scheme.startswith("//"):
raise ValueError("proxy URL with no authority: %r" % proxy)
# We have an authority, so for RFC 3986-compliant URLs (by ss 3.
# and 3.3.), path is empty or starts with '/'
end = r_scheme.find("/", 2)
if end == -1:
end = None
authority = r_scheme[2:end]
userinfo, hostport = splituser(authority)
if userinfo is not None:
user, password = splitpasswd(userinfo)
else:
user = password = None
return scheme, user, password, hostport
def proxy_open(self, req, proxy, type):
# This block is copied wholesale from Python2.6 urllib2.
# It is idempotent, so the superclass method call executes as normal
# if invoked.
orig_type = req.get_type()
proxy_type, user, password, hostport = self._parse_proxy(proxy)
if proxy_type is None:
proxy_type = orig_type
if user and password:
user_pass = "%s:%s" % (urllib2.unquote(user), urllib2.unquote(password))
creds = base64.b64encode(user_pass).strip()
# Later calls overwrite earlier calls for the same header
req.add_header("Proxy-authorization", "Basic " + creds)
hostport = urllib2.unquote(hostport)
req.set_proxy(hostport, proxy_type)
# This condition is the change
if orig_type == "https":
return None
return urllib2.ProxyHandler.proxy_open(self, req, proxy, type)
class FancyHTTPSHandler(urllib2.HTTPSHandler):
"""An HTTPSHandler that works with CONNECT-enabled proxies."""
def do_open(self, http_class, req):
# Intentionally very specific so as to opt for false negatives
# rather than false positives.
try:
return urllib2.HTTPSHandler.do_open(
self,
_create_fancy_connection(req._tunnel_host,
req._key_file,
req._cert_file,
req._ca_certs),
req)
except urllib2.URLError, url_error:
try:
import ssl
if (type(url_error.reason) == ssl.SSLError and
url_error.reason.args[0] == 1):
# Display the reason to the user. Need to use args for python2.5
# compat.
raise InvalidCertificateException(req.host, '',
url_error.reason.args[1])
except ImportError:
pass
raise url_error
# We have to implement this so that we persist the tunneling behavior
# through redirects.
class FancyRedirectHandler(urllib2.HTTPRedirectHandler):
"""A redirect handler that persists CONNECT-enabled proxy information."""
def redirect_request(self, req, *args, **kwargs):
new_req = urllib2.HTTPRedirectHandler.redirect_request(
self, req, *args, **kwargs)
# Same thing as in our set_proxy implementation, but in this case
# we"ve only got a Request to work with, so it was this or copy
# everything over piecemeal.
#
# Note that we do not persist tunneling behavior from an http request
# to an https request, because an http request does not set _tunnel_host.
#
# Also note that in Python < 2.6, you will get an error in
# FancyHTTPSHandler.do_open() on an https urllib2.Request that uses an http
# proxy, since the proxy type will be set to http instead of https.
# (FancyRequest, and urllib2.Request in Python >= 2.6 set the proxy type to
# https.) Such an urllib2.Request could result from this redirect
# if you are redirecting from an http request (since an an http request
# does not have _tunnel_host set, and thus you will not set the proxy
# in the code below), and if you have defined a proxy for https in, say,
# FancyProxyHandler, and that proxy has type http.
if hasattr(req, "_tunnel_host") and isinstance(new_req, urllib2.Request):
if new_req.get_type() == "https":
if req._tunnel_host:
# req is proxied, so copy the proxy info.
new_req._tunnel_host = new_req.get_host()
new_req.set_proxy(req.host, "https")
else:
# req is not proxied, so just make sure _tunnel_host is defined.
new_req._tunnel_host = None
new_req.type = "https"
if hasattr(req, "_key_file") and isinstance(new_req, urllib2.Request):
# Copy the auxiliary data in case this or any further redirect is https
new_req._key_file = req._key_file
new_req._cert_file = req._cert_file
new_req._ca_certs = req._ca_certs
return new_req
| bsd-3-clause |
dhoffman34/django | django/contrib/gis/tests/layermap/models.py | 75 | 2275 | from django.contrib.gis.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class NamedModel(models.Model):
name = models.CharField(max_length=25)
objects = models.GeoManager()
class Meta:
abstract = True
app_label = 'layermap'
def __str__(self):
return self.name
class State(NamedModel):
pass
class County(NamedModel):
state = models.ForeignKey(State)
mpoly = models.MultiPolygonField(srid=4269) # Multipolygon in NAD83
class CountyFeat(NamedModel):
poly = models.PolygonField(srid=4269)
class City(NamedModel):
name_txt = models.TextField(default='')
population = models.IntegerField()
density = models.DecimalField(max_digits=7, decimal_places=1)
dt = models.DateField()
point = models.PointField()
class Interstate(NamedModel):
length = models.DecimalField(max_digits=6, decimal_places=2)
path = models.LineStringField()
# Same as `City` above, but for testing model inheritance.
class CityBase(NamedModel):
population = models.IntegerField()
density = models.DecimalField(max_digits=7, decimal_places=1)
point = models.PointField()
class ICity1(CityBase):
dt = models.DateField()
class Meta(CityBase.Meta):
pass
class ICity2(ICity1):
dt_time = models.DateTimeField(auto_now=True)
class Meta(ICity1.Meta):
pass
class Invalid(models.Model):
point = models.PointField()
class Meta:
app_label = 'layermap'
# Mapping dictionaries for the models above.
co_mapping = {'name': 'Name',
'state': {'name': 'State'}, # ForeignKey's use another mapping dictionary for the _related_ Model (State in this case).
'mpoly': 'MULTIPOLYGON', # Will convert POLYGON features into MULTIPOLYGONS.
}
cofeat_mapping = {'name': 'Name',
'poly': 'POLYGON',
}
city_mapping = {'name': 'Name',
'population': 'Population',
'density': 'Density',
'dt': 'Created',
'point': 'POINT',
}
inter_mapping = {'name': 'Name',
'length': 'Length',
'path': 'LINESTRING',
}
| bsd-3-clause |
fibbo/DIRAC | WorkloadManagementSystem/PilotAgent/dirac-pilot.py | 9 | 2136 | #!/usr/bin/env python
""" The dirac-pilot.py script is a steering script to execute a series of
pilot commands. The commands may be provided in the pilot input sandbox, and are coded in
the pilotCommands.py module or in any <EXTENSION>Commands.py module.
The pilot script defines two switches in order to choose a set of commands for the pilot:
-E, --commandExtensions value
where the value is a comma separated list of extension names. Modules
with names <EXTENSION>Commands.py will be searched for the commands in
the order defined in the value. By default no extensions are given
-X, --commands value
where value is a comma separated list of pilot commands. By default
the list is InstallDIRAC,ConfigureDIRAC,LaunchAgent
The pilot script by default performs initial sanity checks on WN, installs and configures
DIRAC and runs the Job Agent to execute pending workloads in the DIRAC WMS.
But, as said, all the actions are actually configurable.
"""
__RCSID__ = "$Id$"
import os
import getopt
import sys
from types import ListType
from pilotTools import Logger, pythonPathCheck, PilotParams, getCommand
if __name__ == "__main__":
pythonPathCheck()
log = Logger( 'Pilot' )
pilotParams = PilotParams()
if pilotParams.debugFlag:
log.setDebug()
pilotParams.pilotRootPath = os.getcwd()
pilotParams.pilotScript = os.path.realpath( sys.argv[0] )
pilotParams.pilotScriptName = os.path.basename( pilotParams.pilotScript )
log.debug( 'PARAMETER [%s]' % ', '.join( map( str, pilotParams.optList ) ) )
log.info( "Executing commands: %s" % str( pilotParams.commands ) )
if pilotParams.commandExtensions:
log.info( "Requested command extensions: %s" % str( pilotParams.commandExtensions ) )
for commandName in pilotParams.commands:
command, module = getCommand( pilotParams, commandName, log )
if command is not None:
log.info( "Command %s instantiated from %s" % ( commandName, module ) )
command.execute()
else:
log.error( "Command %s could not be instantiated" % commandName )
sys.exit( -1 )
| gpl-3.0 |
Bakterija/mmplayer | mmplayer/other/garden_contextmenu/examples/simple_context_menu.py | 3 | 1977 | import kivy
from kivy.app import App
from kivy.lang import Builder
kivy.require('1.9.0')
import kivy.garden.contextmenu
kv = """
FloatLayout:
id: layout
Label:
pos: 10, self.parent.height - self.height - 10
text: "Left click anywhere outside the context menu to close it"
size_hint: None, None
size: self.texture_size
Button:
size_hint: None, None
pos_hint: {"center_x": 0.5, "center_y": 0.8 }
size: 300, 40
text: "Click me to show the context menu"
on_release: context_menu.show(*app.root_window.mouse_pos)
ContextMenu:
id: context_menu
visible: False
cancel_handler_widget: layout
ContextMenuTextItem:
text: "SubMenu #2"
ContextMenuTextItem:
text: "SubMenu #3"
ContextMenu:
ContextMenuTextItem:
text: "SubMenu #5"
ContextMenuTextItem:
text: "SubMenu #6"
ContextMenu:
ContextMenuTextItem:
text: "SubMenu #9"
ContextMenuTextItem:
text: "SubMenu #10"
ContextMenuTextItem:
text: "SubMenu #11"
ContextMenuTextItem:
text: "Hello, World!"
on_release: app.say_hello(self.text)
ContextMenuTextItem:
text: "SubMenu #12"
ContextMenuTextItem:
text: "SubMenu #7"
ContextMenuTextItem:
text: "SubMenu #4"
"""
class MyApp(App):
def build(self):
self.title = 'Simple context menu example'
return Builder.load_string(kv)
def say_hello(self, text):
print(text)
self.root.ids['context_menu'].hide()
if __name__ == '__main__':
MyApp().run() | mit |
nicolargo/intellij-community | plugins/hg4idea/testData/bin/mercurial/commands.py | 90 | 216701 | # commands.py - command processing for mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from node import hex, bin, nullid, nullrev, short
from lock import release
from i18n import _
import os, re, difflib, time, tempfile, errno
import hg, scmutil, util, revlog, copies, error, bookmarks
import patch, help, encoding, templatekw, discovery
import archival, changegroup, cmdutil, hbisect
import sshserver, hgweb, hgweb.server, commandserver
import merge as mergemod
import minirst, revset, fileset
import dagparser, context, simplemerge, graphmod
import random, setdiscovery, treediscovery, dagutil, pvec, localrepo
import phases, obsolete
table = {}
command = cmdutil.command(table)
# common command options
globalopts = [
('R', 'repository', '',
_('repository root directory or name of overlay bundle file'),
_('REPO')),
('', 'cwd', '',
_('change working directory'), _('DIR')),
('y', 'noninteractive', None,
_('do not prompt, automatically pick the first choice for all prompts')),
('q', 'quiet', None, _('suppress output')),
('v', 'verbose', None, _('enable additional output')),
('', 'config', [],
_('set/override config option (use \'section.name=value\')'),
_('CONFIG')),
('', 'debug', None, _('enable debugging output')),
('', 'debugger', None, _('start debugger')),
('', 'encoding', encoding.encoding, _('set the charset encoding'),
_('ENCODE')),
('', 'encodingmode', encoding.encodingmode,
_('set the charset encoding mode'), _('MODE')),
('', 'traceback', None, _('always print a traceback on exception')),
('', 'time', None, _('time how long the command takes')),
('', 'profile', None, _('print command execution profile')),
('', 'version', None, _('output version information and exit')),
('h', 'help', None, _('display help and exit')),
('', 'hidden', False, _('consider hidden changesets')),
]
dryrunopts = [('n', 'dry-run', None,
_('do not perform actions, just print output'))]
remoteopts = [
('e', 'ssh', '',
_('specify ssh command to use'), _('CMD')),
('', 'remotecmd', '',
_('specify hg command to run on the remote side'), _('CMD')),
('', 'insecure', None,
_('do not verify server certificate (ignoring web.cacerts config)')),
]
walkopts = [
('I', 'include', [],
_('include names matching the given patterns'), _('PATTERN')),
('X', 'exclude', [],
_('exclude names matching the given patterns'), _('PATTERN')),
]
commitopts = [
('m', 'message', '',
_('use text as commit message'), _('TEXT')),
('l', 'logfile', '',
_('read commit message from file'), _('FILE')),
]
commitopts2 = [
('d', 'date', '',
_('record the specified date as commit date'), _('DATE')),
('u', 'user', '',
_('record the specified user as committer'), _('USER')),
]
templateopts = [
('', 'style', '',
_('display using template map file'), _('STYLE')),
('', 'template', '',
_('display with template'), _('TEMPLATE')),
]
logopts = [
('p', 'patch', None, _('show patch')),
('g', 'git', None, _('use git extended diff format')),
('l', 'limit', '',
_('limit number of changes displayed'), _('NUM')),
('M', 'no-merges', None, _('do not show merges')),
('', 'stat', None, _('output diffstat-style summary of changes')),
('G', 'graph', None, _("show the revision DAG")),
] + templateopts
diffopts = [
('a', 'text', None, _('treat all files as text')),
('g', 'git', None, _('use git extended diff format')),
('', 'nodates', None, _('omit dates from diff headers'))
]
diffwsopts = [
('w', 'ignore-all-space', None,
_('ignore white space when comparing lines')),
('b', 'ignore-space-change', None,
_('ignore changes in the amount of white space')),
('B', 'ignore-blank-lines', None,
_('ignore changes whose lines are all blank')),
]
diffopts2 = [
('p', 'show-function', None, _('show which function each change is in')),
('', 'reverse', None, _('produce a diff that undoes the changes')),
] + diffwsopts + [
('U', 'unified', '',
_('number of lines of context to show'), _('NUM')),
('', 'stat', None, _('output diffstat-style summary of changes')),
]
mergetoolopts = [
('t', 'tool', '', _('specify merge tool')),
]
similarityopts = [
('s', 'similarity', '',
_('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
]
subrepoopts = [
('S', 'subrepos', None,
_('recurse into subrepositories'))
]
# Commands start here, listed alphabetically
@command('^add',
walkopts + subrepoopts + dryrunopts,
_('[OPTION]... [FILE]...'))
def add(ui, repo, *pats, **opts):
"""add the specified files on the next commit
Schedule files to be version controlled and added to the
repository.
The files will be added to the repository at the next commit. To
undo an add before that, see :hg:`forget`.
If no names are given, add all files to the repository.
.. container:: verbose
An example showing how new (unknown) files are added
automatically by :hg:`add`::
$ ls
foo.c
$ hg status
? foo.c
$ hg add
adding foo.c
$ hg status
A foo.c
Returns 0 if all files are successfully added.
"""
m = scmutil.match(repo[None], pats, opts)
rejected = cmdutil.add(ui, repo, m, opts.get('dry_run'),
opts.get('subrepos'), prefix="", explicitonly=False)
return rejected and 1 or 0
@command('addremove',
similarityopts + walkopts + dryrunopts,
_('[OPTION]... [FILE]...'))
def addremove(ui, repo, *pats, **opts):
"""add all new files, delete all missing files
Add all new files and remove all missing files from the
repository.
New files are ignored if they match any of the patterns in
``.hgignore``. As with add, these changes take effect at the next
commit.
Use the -s/--similarity option to detect renamed files. This
option takes a percentage between 0 (disabled) and 100 (files must
be identical) as its parameter. With a parameter greater than 0,
this compares every removed file with every added file and records
those similar enough as renames. Detecting renamed files this way
can be expensive. After using this option, :hg:`status -C` can be
used to check which files were identified as moved or renamed. If
not specified, -s/--similarity defaults to 100 and only renames of
identical files are detected.
Returns 0 if all files are successfully added.
"""
try:
sim = float(opts.get('similarity') or 100)
except ValueError:
raise util.Abort(_('similarity must be a number'))
if sim < 0 or sim > 100:
raise util.Abort(_('similarity must be between 0 and 100'))
return scmutil.addremove(repo, pats, opts, similarity=sim / 100.0)
@command('^annotate|blame',
[('r', 'rev', '', _('annotate the specified revision'), _('REV')),
('', 'follow', None,
_('follow copies/renames and list the filename (DEPRECATED)')),
('', 'no-follow', None, _("don't follow copies and renames")),
('a', 'text', None, _('treat all files as text')),
('u', 'user', None, _('list the author (long with -v)')),
('f', 'file', None, _('list the filename')),
('d', 'date', None, _('list the date (short with -q)')),
('n', 'number', None, _('list the revision number (default)')),
('c', 'changeset', None, _('list the changeset')),
('l', 'line-number', None, _('show line number at the first appearance'))
] + diffwsopts + walkopts,
_('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'))
def annotate(ui, repo, *pats, **opts):
"""show changeset information by line for each file
List changes in files, showing the revision id responsible for
each line
This command is useful for discovering when a change was made and
by whom.
Without the -a/--text option, annotate will avoid processing files
it detects as binary. With -a, annotate will annotate the file
anyway, although the results will probably be neither useful
nor desirable.
Returns 0 on success.
"""
if opts.get('follow'):
# --follow is deprecated and now just an alias for -f/--file
# to mimic the behavior of Mercurial before version 1.5
opts['file'] = True
datefunc = ui.quiet and util.shortdate or util.datestr
getdate = util.cachefunc(lambda x: datefunc(x[0].date()))
if not pats:
raise util.Abort(_('at least one filename or pattern is required'))
hexfn = ui.debugflag and hex or short
opmap = [('user', ' ', lambda x: ui.shortuser(x[0].user())),
('number', ' ', lambda x: str(x[0].rev())),
('changeset', ' ', lambda x: hexfn(x[0].node())),
('date', ' ', getdate),
('file', ' ', lambda x: x[0].path()),
('line_number', ':', lambda x: str(x[1])),
]
if (not opts.get('user') and not opts.get('changeset')
and not opts.get('date') and not opts.get('file')):
opts['number'] = True
linenumber = opts.get('line_number') is not None
if linenumber and (not opts.get('changeset')) and (not opts.get('number')):
raise util.Abort(_('at least one of -n/-c is required for -l'))
funcmap = [(func, sep) for op, sep, func in opmap if opts.get(op)]
funcmap[0] = (funcmap[0][0], '') # no separator in front of first column
def bad(x, y):
raise util.Abort("%s: %s" % (x, y))
ctx = scmutil.revsingle(repo, opts.get('rev'))
m = scmutil.match(ctx, pats, opts)
m.bad = bad
follow = not opts.get('no_follow')
diffopts = patch.diffopts(ui, opts, section='annotate')
for abs in ctx.walk(m):
fctx = ctx[abs]
if not opts.get('text') and util.binary(fctx.data()):
ui.write(_("%s: binary file\n") % ((pats and m.rel(abs)) or abs))
continue
lines = fctx.annotate(follow=follow, linenumber=linenumber,
diffopts=diffopts)
pieces = []
for f, sep in funcmap:
l = [f(n) for n, dummy in lines]
if l:
sized = [(x, encoding.colwidth(x)) for x in l]
ml = max([w for x, w in sized])
pieces.append(["%s%s%s" % (sep, ' ' * (ml - w), x)
for x, w in sized])
if pieces:
for p, l in zip(zip(*pieces), lines):
ui.write("%s: %s" % ("".join(p), l[1]))
if lines and not lines[-1][1].endswith('\n'):
ui.write('\n')
@command('archive',
[('', 'no-decode', None, _('do not pass files through decoders')),
('p', 'prefix', '', _('directory prefix for files in archive'),
_('PREFIX')),
('r', 'rev', '', _('revision to distribute'), _('REV')),
('t', 'type', '', _('type of distribution to create'), _('TYPE')),
] + subrepoopts + walkopts,
_('[OPTION]... DEST'))
def archive(ui, repo, dest, **opts):
'''create an unversioned archive of a repository revision
By default, the revision used is the parent of the working
directory; use -r/--rev to specify a different revision.
The archive type is automatically detected based on file
extension (or override using -t/--type).
.. container:: verbose
Examples:
- create a zip file containing the 1.0 release::
hg archive -r 1.0 project-1.0.zip
- create a tarball excluding .hg files::
hg archive project.tar.gz -X ".hg*"
Valid types are:
:``files``: a directory full of files (default)
:``tar``: tar archive, uncompressed
:``tbz2``: tar archive, compressed using bzip2
:``tgz``: tar archive, compressed using gzip
:``uzip``: zip archive, uncompressed
:``zip``: zip archive, compressed using deflate
The exact name of the destination archive or directory is given
using a format string; see :hg:`help export` for details.
Each member added to an archive file has a directory prefix
prepended. Use -p/--prefix to specify a format string for the
prefix. The default is the basename of the archive, with suffixes
removed.
Returns 0 on success.
'''
ctx = scmutil.revsingle(repo, opts.get('rev'))
if not ctx:
raise util.Abort(_('no working directory: please specify a revision'))
node = ctx.node()
dest = cmdutil.makefilename(repo, dest, node)
if os.path.realpath(dest) == repo.root:
raise util.Abort(_('repository root cannot be destination'))
kind = opts.get('type') or archival.guesskind(dest) or 'files'
prefix = opts.get('prefix')
if dest == '-':
if kind == 'files':
raise util.Abort(_('cannot archive plain files to stdout'))
dest = cmdutil.makefileobj(repo, dest)
if not prefix:
prefix = os.path.basename(repo.root) + '-%h'
prefix = cmdutil.makefilename(repo, prefix, node)
matchfn = scmutil.match(ctx, [], opts)
archival.archive(repo, dest, node, kind, not opts.get('no_decode'),
matchfn, prefix, subrepos=opts.get('subrepos'))
@command('backout',
[('', 'merge', None, _('merge with old dirstate parent after backout')),
('', 'parent', '',
_('parent to choose when backing out merge (DEPRECATED)'), _('REV')),
('r', 'rev', '', _('revision to backout'), _('REV')),
] + mergetoolopts + walkopts + commitopts + commitopts2,
_('[OPTION]... [-r] REV'))
def backout(ui, repo, node=None, rev=None, **opts):
'''reverse effect of earlier changeset
Prepare a new changeset with the effect of REV undone in the
current working directory.
If REV is the parent of the working directory, then this new changeset
is committed automatically. Otherwise, hg needs to merge the
changes and the merged result is left uncommitted.
.. note::
backout cannot be used to fix either an unwanted or
incorrect merge.
.. container:: verbose
By default, the pending changeset will have one parent,
maintaining a linear history. With --merge, the pending
changeset will instead have two parents: the old parent of the
working directory and a new child of REV that simply undoes REV.
Before version 1.7, the behavior without --merge was equivalent
to specifying --merge followed by :hg:`update --clean .` to
cancel the merge and leave the child of REV as a head to be
merged separately.
See :hg:`help dates` for a list of formats valid for -d/--date.
Returns 0 on success.
'''
if rev and node:
raise util.Abort(_("please specify just one revision"))
if not rev:
rev = node
if not rev:
raise util.Abort(_("please specify a revision to backout"))
date = opts.get('date')
if date:
opts['date'] = util.parsedate(date)
cmdutil.bailifchanged(repo)
node = scmutil.revsingle(repo, rev).node()
op1, op2 = repo.dirstate.parents()
a = repo.changelog.ancestor(op1, node)
if a != node:
raise util.Abort(_('cannot backout change on a different branch'))
p1, p2 = repo.changelog.parents(node)
if p1 == nullid:
raise util.Abort(_('cannot backout a change with no parents'))
if p2 != nullid:
if not opts.get('parent'):
raise util.Abort(_('cannot backout a merge changeset'))
p = repo.lookup(opts['parent'])
if p not in (p1, p2):
raise util.Abort(_('%s is not a parent of %s') %
(short(p), short(node)))
parent = p
else:
if opts.get('parent'):
raise util.Abort(_('cannot use --parent on non-merge changeset'))
parent = p1
# the backout should appear on the same branch
wlock = repo.wlock()
try:
branch = repo.dirstate.branch()
bheads = repo.branchheads(branch)
hg.clean(repo, node, show_stats=False)
repo.dirstate.setbranch(branch)
rctx = scmutil.revsingle(repo, hex(parent))
cmdutil.revert(ui, repo, rctx, repo.dirstate.parents())
if not opts.get('merge') and op1 != node:
try:
ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
return hg.update(repo, op1)
finally:
ui.setconfig('ui', 'forcemerge', '')
e = cmdutil.commiteditor
if not opts['message'] and not opts['logfile']:
# we don't translate commit messages
opts['message'] = "Backed out changeset %s" % short(node)
e = cmdutil.commitforceeditor
def commitfunc(ui, repo, message, match, opts):
return repo.commit(message, opts.get('user'), opts.get('date'),
match, editor=e)
newnode = cmdutil.commit(ui, repo, commitfunc, [], opts)
cmdutil.commitstatus(repo, newnode, branch, bheads)
def nice(node):
return '%d:%s' % (repo.changelog.rev(node), short(node))
ui.status(_('changeset %s backs out changeset %s\n') %
(nice(repo.changelog.tip()), nice(node)))
if opts.get('merge') and op1 != node:
hg.clean(repo, op1, show_stats=False)
ui.status(_('merging with changeset %s\n')
% nice(repo.changelog.tip()))
try:
ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
return hg.merge(repo, hex(repo.changelog.tip()))
finally:
ui.setconfig('ui', 'forcemerge', '')
finally:
wlock.release()
return 0
@command('bisect',
[('r', 'reset', False, _('reset bisect state')),
('g', 'good', False, _('mark changeset good')),
('b', 'bad', False, _('mark changeset bad')),
('s', 'skip', False, _('skip testing changeset')),
('e', 'extend', False, _('extend the bisect range')),
('c', 'command', '', _('use command to check changeset state'), _('CMD')),
('U', 'noupdate', False, _('do not update to target'))],
_("[-gbsr] [-U] [-c CMD] [REV]"))
def bisect(ui, repo, rev=None, extra=None, command=None,
reset=None, good=None, bad=None, skip=None, extend=None,
noupdate=None):
"""subdivision search of changesets
This command helps to find changesets which introduce problems. To
use, mark the earliest changeset you know exhibits the problem as
bad, then mark the latest changeset which is free from the problem
as good. Bisect will update your working directory to a revision
for testing (unless the -U/--noupdate option is specified). Once
you have performed tests, mark the working directory as good or
bad, and bisect will either update to another candidate changeset
or announce that it has found the bad revision.
As a shortcut, you can also use the revision argument to mark a
revision as good or bad without checking it out first.
If you supply a command, it will be used for automatic bisection.
The environment variable HG_NODE will contain the ID of the
changeset being tested. The exit status of the command will be
used to mark revisions as good or bad: status 0 means good, 125
means to skip the revision, 127 (command not found) will abort the
bisection, and any other non-zero exit status means the revision
is bad.
.. container:: verbose
Some examples:
- start a bisection with known bad revision 12, and good revision 34::
hg bisect --bad 34
hg bisect --good 12
- advance the current bisection by marking current revision as good or
bad::
hg bisect --good
hg bisect --bad
- mark the current revision, or a known revision, to be skipped (e.g. if
that revision is not usable because of another issue)::
hg bisect --skip
hg bisect --skip 23
- skip all revisions that do not touch directories ``foo`` or ``bar``
hg bisect --skip '!( file("path:foo") & file("path:bar") )'
- forget the current bisection::
hg bisect --reset
- use 'make && make tests' to automatically find the first broken
revision::
hg bisect --reset
hg bisect --bad 34
hg bisect --good 12
hg bisect --command 'make && make tests'
- see all changesets whose states are already known in the current
bisection::
hg log -r "bisect(pruned)"
- see the changeset currently being bisected (especially useful
if running with -U/--noupdate)::
hg log -r "bisect(current)"
- see all changesets that took part in the current bisection::
hg log -r "bisect(range)"
- with the graphlog extension, you can even get a nice graph::
hg log --graph -r "bisect(range)"
See :hg:`help revsets` for more about the `bisect()` keyword.
Returns 0 on success.
"""
def extendbisectrange(nodes, good):
# bisect is incomplete when it ends on a merge node and
# one of the parent was not checked.
parents = repo[nodes[0]].parents()
if len(parents) > 1:
side = good and state['bad'] or state['good']
num = len(set(i.node() for i in parents) & set(side))
if num == 1:
return parents[0].ancestor(parents[1])
return None
def print_result(nodes, good):
displayer = cmdutil.show_changeset(ui, repo, {})
if len(nodes) == 1:
# narrowed it down to a single revision
if good:
ui.write(_("The first good revision is:\n"))
else:
ui.write(_("The first bad revision is:\n"))
displayer.show(repo[nodes[0]])
extendnode = extendbisectrange(nodes, good)
if extendnode is not None:
ui.write(_('Not all ancestors of this changeset have been'
' checked.\nUse bisect --extend to continue the '
'bisection from\nthe common ancestor, %s.\n')
% extendnode)
else:
# multiple possible revisions
if good:
ui.write(_("Due to skipped revisions, the first "
"good revision could be any of:\n"))
else:
ui.write(_("Due to skipped revisions, the first "
"bad revision could be any of:\n"))
for n in nodes:
displayer.show(repo[n])
displayer.close()
def check_state(state, interactive=True):
if not state['good'] or not state['bad']:
if (good or bad or skip or reset) and interactive:
return
if not state['good']:
raise util.Abort(_('cannot bisect (no known good revisions)'))
else:
raise util.Abort(_('cannot bisect (no known bad revisions)'))
return True
# backward compatibility
if rev in "good bad reset init".split():
ui.warn(_("(use of 'hg bisect <cmd>' is deprecated)\n"))
cmd, rev, extra = rev, extra, None
if cmd == "good":
good = True
elif cmd == "bad":
bad = True
else:
reset = True
elif extra or good + bad + skip + reset + extend + bool(command) > 1:
raise util.Abort(_('incompatible arguments'))
if reset:
p = repo.join("bisect.state")
if os.path.exists(p):
os.unlink(p)
return
state = hbisect.load_state(repo)
if command:
changesets = 1
try:
node = state['current'][0]
except LookupError:
if noupdate:
raise util.Abort(_('current bisect revision is unknown - '
'start a new bisect to fix'))
node, p2 = repo.dirstate.parents()
if p2 != nullid:
raise util.Abort(_('current bisect revision is a merge'))
try:
while changesets:
# update state
state['current'] = [node]
hbisect.save_state(repo, state)
status = util.system(command,
environ={'HG_NODE': hex(node)},
out=ui.fout)
if status == 125:
transition = "skip"
elif status == 0:
transition = "good"
# status < 0 means process was killed
elif status == 127:
raise util.Abort(_("failed to execute %s") % command)
elif status < 0:
raise util.Abort(_("%s killed") % command)
else:
transition = "bad"
ctx = scmutil.revsingle(repo, rev, node)
rev = None # clear for future iterations
state[transition].append(ctx.node())
ui.status(_('changeset %d:%s: %s\n') % (ctx, ctx, transition))
check_state(state, interactive=False)
# bisect
nodes, changesets, good = hbisect.bisect(repo.changelog, state)
# update to next check
node = nodes[0]
if not noupdate:
cmdutil.bailifchanged(repo)
hg.clean(repo, node, show_stats=False)
finally:
state['current'] = [node]
hbisect.save_state(repo, state)
print_result(nodes, good)
return
# update state
if rev:
nodes = [repo.lookup(i) for i in scmutil.revrange(repo, [rev])]
else:
nodes = [repo.lookup('.')]
if good or bad or skip:
if good:
state['good'] += nodes
elif bad:
state['bad'] += nodes
elif skip:
state['skip'] += nodes
hbisect.save_state(repo, state)
if not check_state(state):
return
# actually bisect
nodes, changesets, good = hbisect.bisect(repo.changelog, state)
if extend:
if not changesets:
extendnode = extendbisectrange(nodes, good)
if extendnode is not None:
ui.write(_("Extending search to changeset %d:%s\n"
% (extendnode.rev(), extendnode)))
state['current'] = [extendnode.node()]
hbisect.save_state(repo, state)
if noupdate:
return
cmdutil.bailifchanged(repo)
return hg.clean(repo, extendnode.node())
raise util.Abort(_("nothing to extend"))
if changesets == 0:
print_result(nodes, good)
else:
assert len(nodes) == 1 # only a single node can be tested next
node = nodes[0]
# compute the approximate number of remaining tests
tests, size = 0, 2
while size <= changesets:
tests, size = tests + 1, size * 2
rev = repo.changelog.rev(node)
ui.write(_("Testing changeset %d:%s "
"(%d changesets remaining, ~%d tests)\n")
% (rev, short(node), changesets, tests))
state['current'] = [node]
hbisect.save_state(repo, state)
if not noupdate:
cmdutil.bailifchanged(repo)
return hg.clean(repo, node)
@command('bookmarks|bookmark',
[('f', 'force', False, _('force')),
('r', 'rev', '', _('revision'), _('REV')),
('d', 'delete', False, _('delete a given bookmark')),
('m', 'rename', '', _('rename a given bookmark'), _('NAME')),
('i', 'inactive', False, _('mark a bookmark inactive'))],
_('hg bookmarks [-f] [-d] [-i] [-m NAME] [-r REV] [NAME]'))
def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False,
rename=None, inactive=False):
'''track a line of development with movable markers
Bookmarks are pointers to certain commits that move when committing.
Bookmarks are local. They can be renamed, copied and deleted. It is
possible to use :hg:`merge NAME` to merge from a given bookmark, and
:hg:`update NAME` to update to a given bookmark.
You can use :hg:`bookmark NAME` to set a bookmark on the working
directory's parent revision with the given name. If you specify
a revision using -r REV (where REV may be an existing bookmark),
the bookmark is assigned to that revision.
Bookmarks can be pushed and pulled between repositories (see :hg:`help
push` and :hg:`help pull`). This requires both the local and remote
repositories to support bookmarks. For versions prior to 1.8, this means
the bookmarks extension must be enabled.
If you set a bookmark called '@', new clones of the repository will
have that revision checked out (and the bookmark made active) by
default.
With -i/--inactive, the new bookmark will not be made the active
bookmark. If -r/--rev is given, the new bookmark will not be made
active even if -i/--inactive is not given. If no NAME is given, the
current active bookmark will be marked inactive.
'''
hexfn = ui.debugflag and hex or short
marks = repo._bookmarks
cur = repo.changectx('.').node()
def checkformat(mark):
mark = mark.strip()
if not mark:
raise util.Abort(_("bookmark names cannot consist entirely of "
"whitespace"))
scmutil.checknewlabel(repo, mark, 'bookmark')
return mark
def checkconflict(repo, mark, force=False, target=None):
if mark in marks and not force:
if target:
if marks[mark] == target and target == cur:
# re-activating a bookmark
return
anc = repo.changelog.ancestors([repo[target].rev()])
bmctx = repo[marks[mark]]
divs = [repo[b].node() for b in marks
if b.split('@', 1)[0] == mark.split('@', 1)[0]]
# allow resolving a single divergent bookmark even if moving
# the bookmark across branches when a revision is specified
# that contains a divergent bookmark
if bmctx.rev() not in anc and target in divs:
bookmarks.deletedivergent(repo, [target], mark)
return
deletefrom = [b for b in divs
if repo[b].rev() in anc or b == target]
bookmarks.deletedivergent(repo, deletefrom, mark)
if bmctx.rev() in anc:
ui.status(_("moving bookmark '%s' forward from %s\n") %
(mark, short(bmctx.node())))
return
raise util.Abort(_("bookmark '%s' already exists "
"(use -f to force)") % mark)
if ((mark in repo.branchmap() or mark == repo.dirstate.branch())
and not force):
raise util.Abort(
_("a bookmark cannot have the name of an existing branch"))
if delete and rename:
raise util.Abort(_("--delete and --rename are incompatible"))
if delete and rev:
raise util.Abort(_("--rev is incompatible with --delete"))
if rename and rev:
raise util.Abort(_("--rev is incompatible with --rename"))
if mark is None and (delete or rev):
raise util.Abort(_("bookmark name required"))
if delete:
if mark not in marks:
raise util.Abort(_("bookmark '%s' does not exist") % mark)
if mark == repo._bookmarkcurrent:
bookmarks.setcurrent(repo, None)
del marks[mark]
marks.write()
elif rename:
if mark is None:
raise util.Abort(_("new bookmark name required"))
mark = checkformat(mark)
if rename not in marks:
raise util.Abort(_("bookmark '%s' does not exist") % rename)
checkconflict(repo, mark, force)
marks[mark] = marks[rename]
if repo._bookmarkcurrent == rename and not inactive:
bookmarks.setcurrent(repo, mark)
del marks[rename]
marks.write()
elif mark is not None:
mark = checkformat(mark)
if inactive and mark == repo._bookmarkcurrent:
bookmarks.setcurrent(repo, None)
return
tgt = cur
if rev:
tgt = scmutil.revsingle(repo, rev).node()
checkconflict(repo, mark, force, tgt)
marks[mark] = tgt
if not inactive and cur == marks[mark] and not rev:
bookmarks.setcurrent(repo, mark)
elif cur != tgt and mark == repo._bookmarkcurrent:
bookmarks.setcurrent(repo, None)
marks.write()
# Same message whether trying to deactivate the current bookmark (-i
# with no NAME) or listing bookmarks
elif len(marks) == 0:
ui.status(_("no bookmarks set\n"))
elif inactive:
if not repo._bookmarkcurrent:
ui.status(_("no active bookmark\n"))
else:
bookmarks.setcurrent(repo, None)
else: # show bookmarks
for bmark, n in sorted(marks.iteritems()):
current = repo._bookmarkcurrent
if bmark == current:
prefix, label = '*', 'bookmarks.current'
else:
prefix, label = ' ', ''
if ui.quiet:
ui.write("%s\n" % bmark, label=label)
else:
ui.write(" %s %-25s %d:%s\n" % (
prefix, bmark, repo.changelog.rev(n), hexfn(n)),
label=label)
@command('branch',
[('f', 'force', None,
_('set branch name even if it shadows an existing branch')),
('C', 'clean', None, _('reset branch name to parent branch name'))],
_('[-fC] [NAME]'))
def branch(ui, repo, label=None, **opts):
"""set or show the current branch name
.. note::
Branch names are permanent and global. Use :hg:`bookmark` to create a
light-weight bookmark instead. See :hg:`help glossary` for more
information about named branches and bookmarks.
With no argument, show the current branch name. With one argument,
set the working directory branch name (the branch will not exist
in the repository until the next commit). Standard practice
recommends that primary development take place on the 'default'
branch.
Unless -f/--force is specified, branch will not let you set a
branch name that already exists, even if it's inactive.
Use -C/--clean to reset the working directory branch to that of
the parent of the working directory, negating a previous branch
change.
Use the command :hg:`update` to switch to an existing branch. Use
:hg:`commit --close-branch` to mark this branch as closed.
Returns 0 on success.
"""
if label:
label = label.strip()
if not opts.get('clean') and not label:
ui.write("%s\n" % repo.dirstate.branch())
return
wlock = repo.wlock()
try:
if opts.get('clean'):
label = repo[None].p1().branch()
repo.dirstate.setbranch(label)
ui.status(_('reset working directory to branch %s\n') % label)
elif label:
if not opts.get('force') and label in repo.branchmap():
if label not in [p.branch() for p in repo.parents()]:
raise util.Abort(_('a branch of the same name already'
' exists'),
# i18n: "it" refers to an existing branch
hint=_("use 'hg update' to switch to it"))
scmutil.checknewlabel(repo, label, 'branch')
repo.dirstate.setbranch(label)
ui.status(_('marked working directory as branch %s\n') % label)
ui.status(_('(branches are permanent and global, '
'did you want a bookmark?)\n'))
finally:
wlock.release()
@command('branches',
[('a', 'active', False, _('show only branches that have unmerged heads')),
('c', 'closed', False, _('show normal and closed branches'))],
_('[-ac]'))
def branches(ui, repo, active=False, closed=False):
"""list repository named branches
List the repository's named branches, indicating which ones are
inactive. If -c/--closed is specified, also list branches which have
been marked closed (see :hg:`commit --close-branch`).
If -a/--active is specified, only show active branches. A branch
is considered active if it contains repository heads.
Use the command :hg:`update` to switch to an existing branch.
Returns 0.
"""
hexfunc = ui.debugflag and hex or short
activebranches = set([repo[n].branch() for n in repo.heads()])
branches = []
for tag, heads in repo.branchmap().iteritems():
for h in reversed(heads):
ctx = repo[h]
isopen = not ctx.closesbranch()
if isopen:
tip = ctx
break
else:
tip = repo[heads[-1]]
isactive = tag in activebranches and isopen
branches.append((tip, isactive, isopen))
branches.sort(key=lambda i: (i[1], i[0].rev(), i[0].branch(), i[2]),
reverse=True)
for ctx, isactive, isopen in branches:
if (not active) or isactive:
if isactive:
label = 'branches.active'
notice = ''
elif not isopen:
if not closed:
continue
label = 'branches.closed'
notice = _(' (closed)')
else:
label = 'branches.inactive'
notice = _(' (inactive)')
if ctx.branch() == repo.dirstate.branch():
label = 'branches.current'
rev = str(ctx.rev()).rjust(31 - encoding.colwidth(ctx.branch()))
rev = ui.label('%s:%s' % (rev, hexfunc(ctx.node())),
'log.changeset changeset.%s' % ctx.phasestr())
tag = ui.label(ctx.branch(), label)
if ui.quiet:
ui.write("%s\n" % tag)
else:
ui.write("%s %s%s\n" % (tag, rev, notice))
@command('bundle',
[('f', 'force', None, _('run even when the destination is unrelated')),
('r', 'rev', [], _('a changeset intended to be added to the destination'),
_('REV')),
('b', 'branch', [], _('a specific branch you would like to bundle'),
_('BRANCH')),
('', 'base', [],
_('a base changeset assumed to be available at the destination'),
_('REV')),
('a', 'all', None, _('bundle all changesets in the repository')),
('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE')),
] + remoteopts,
_('[-f] [-t TYPE] [-a] [-r REV]... [--base REV]... FILE [DEST]'))
def bundle(ui, repo, fname, dest=None, **opts):
"""create a changegroup file
Generate a compressed changegroup file collecting changesets not
known to be in another repository.
If you omit the destination repository, then hg assumes the
destination will have all the nodes you specify with --base
parameters. To create a bundle containing all changesets, use
-a/--all (or --base null).
You can change compression method with the -t/--type option.
The available compression methods are: none, bzip2, and
gzip (by default, bundles are compressed using bzip2).
The bundle file can then be transferred using conventional means
and applied to another repository with the unbundle or pull
command. This is useful when direct push and pull are not
available or when exporting an entire repository is undesirable.
Applying bundles preserves all changeset contents including
permissions, copy/rename information, and revision history.
Returns 0 on success, 1 if no changes found.
"""
revs = None
if 'rev' in opts:
revs = scmutil.revrange(repo, opts['rev'])
bundletype = opts.get('type', 'bzip2').lower()
btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
bundletype = btypes.get(bundletype)
if bundletype not in changegroup.bundletypes:
raise util.Abort(_('unknown bundle type specified with --type'))
if opts.get('all'):
base = ['null']
else:
base = scmutil.revrange(repo, opts.get('base'))
if base:
if dest:
raise util.Abort(_("--base is incompatible with specifying "
"a destination"))
common = [repo.lookup(rev) for rev in base]
heads = revs and map(repo.lookup, revs) or revs
cg = repo.getbundle('bundle', heads=heads, common=common)
outgoing = None
else:
dest = ui.expandpath(dest or 'default-push', dest or 'default')
dest, branches = hg.parseurl(dest, opts.get('branch'))
other = hg.peer(repo, opts, dest)
revs, checkout = hg.addbranchrevs(repo, repo, branches, revs)
heads = revs and map(repo.lookup, revs) or revs
outgoing = discovery.findcommonoutgoing(repo, other,
onlyheads=heads,
force=opts.get('force'),
portable=True)
cg = repo.getlocalbundle('bundle', outgoing)
if not cg:
scmutil.nochangesfound(ui, repo, outgoing and outgoing.excluded)
return 1
changegroup.writebundle(cg, fname, bundletype)
@command('cat',
[('o', 'output', '',
_('print output to file with formatted name'), _('FORMAT')),
('r', 'rev', '', _('print the given revision'), _('REV')),
('', 'decode', None, _('apply any matching decode filter')),
] + walkopts,
_('[OPTION]... FILE...'))
def cat(ui, repo, file1, *pats, **opts):
"""output the current or given revision of files
Print the specified files as they were at the given revision. If
no revision is given, the parent of the working directory is used,
or tip if no revision is checked out.
Output may be to a file, in which case the name of the file is
given using a format string. The formatting rules are the same as
for the export command, with the following additions:
:``%s``: basename of file being printed
:``%d``: dirname of file being printed, or '.' if in repository root
:``%p``: root-relative path name of file being printed
Returns 0 on success.
"""
ctx = scmutil.revsingle(repo, opts.get('rev'))
err = 1
m = scmutil.match(ctx, (file1,) + pats, opts)
for abs in ctx.walk(m):
fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
pathname=abs)
data = ctx[abs].data()
if opts.get('decode'):
data = repo.wwritedata(abs, data)
fp.write(data)
fp.close()
err = 0
return err
@command('^clone',
[('U', 'noupdate', None,
_('the clone will include an empty working copy (only a repository)')),
('u', 'updaterev', '', _('revision, tag or branch to check out'), _('REV')),
('r', 'rev', [], _('include the specified changeset'), _('REV')),
('b', 'branch', [], _('clone only the specified branch'), _('BRANCH')),
('', 'pull', None, _('use pull protocol to copy metadata')),
('', 'uncompressed', None, _('use uncompressed transfer (fast over LAN)')),
] + remoteopts,
_('[OPTION]... SOURCE [DEST]'))
def clone(ui, source, dest=None, **opts):
"""make a copy of an existing repository
Create a copy of an existing repository in a new directory.
If no destination directory name is specified, it defaults to the
basename of the source.
The location of the source is added to the new repository's
``.hg/hgrc`` file, as the default to be used for future pulls.
Only local paths and ``ssh://`` URLs are supported as
destinations. For ``ssh://`` destinations, no working directory or
``.hg/hgrc`` will be created on the remote side.
To pull only a subset of changesets, specify one or more revisions
identifiers with -r/--rev or branches with -b/--branch. The
resulting clone will contain only the specified changesets and
their ancestors. These options (or 'clone src#rev dest') imply
--pull, even for local source repositories. Note that specifying a
tag will include the tagged changeset but not the changeset
containing the tag.
If the source repository has a bookmark called '@' set, that
revision will be checked out in the new repository by default.
To check out a particular version, use -u/--update, or
-U/--noupdate to create a clone with no working directory.
.. container:: verbose
For efficiency, hardlinks are used for cloning whenever the
source and destination are on the same filesystem (note this
applies only to the repository data, not to the working
directory). Some filesystems, such as AFS, implement hardlinking
incorrectly, but do not report errors. In these cases, use the
--pull option to avoid hardlinking.
In some cases, you can clone repositories and the working
directory using full hardlinks with ::
$ cp -al REPO REPOCLONE
This is the fastest way to clone, but it is not always safe. The
operation is not atomic (making sure REPO is not modified during
the operation is up to you) and you have to make sure your
editor breaks hardlinks (Emacs and most Linux Kernel tools do
so). Also, this is not compatible with certain extensions that
place their metadata under the .hg directory, such as mq.
Mercurial will update the working directory to the first applicable
revision from this list:
a) null if -U or the source repository has no changesets
b) if -u . and the source repository is local, the first parent of
the source repository's working directory
c) the changeset specified with -u (if a branch name, this means the
latest head of that branch)
d) the changeset specified with -r
e) the tipmost head specified with -b
f) the tipmost head specified with the url#branch source syntax
g) the revision marked with the '@' bookmark, if present
h) the tipmost head of the default branch
i) tip
Examples:
- clone a remote repository to a new directory named hg/::
hg clone http://selenic.com/hg
- create a lightweight local clone::
hg clone project/ project-feature/
- clone from an absolute path on an ssh server (note double-slash)::
hg clone ssh://user@server//home/projects/alpha/
- do a high-speed clone over a LAN while checking out a
specified version::
hg clone --uncompressed http://server/repo -u 1.5
- create a repository without changesets after a particular revision::
hg clone -r 04e544 experimental/ good/
- clone (and track) a particular named branch::
hg clone http://selenic.com/hg#stable
See :hg:`help urls` for details on specifying URLs.
Returns 0 on success.
"""
if opts.get('noupdate') and opts.get('updaterev'):
raise util.Abort(_("cannot specify both --noupdate and --updaterev"))
r = hg.clone(ui, opts, source, dest,
pull=opts.get('pull'),
stream=opts.get('uncompressed'),
rev=opts.get('rev'),
update=opts.get('updaterev') or not opts.get('noupdate'),
branch=opts.get('branch'))
return r is None
@command('^commit|ci',
[('A', 'addremove', None,
_('mark new/missing files as added/removed before committing')),
('', 'close-branch', None,
_('mark a branch as closed, hiding it from the branch list')),
('', 'amend', None, _('amend the parent of the working dir')),
] + walkopts + commitopts + commitopts2 + subrepoopts,
_('[OPTION]... [FILE]...'))
def commit(ui, repo, *pats, **opts):
"""commit the specified files or all outstanding changes
Commit changes to the given files into the repository. Unlike a
centralized SCM, this operation is a local operation. See
:hg:`push` for a way to actively distribute your changes.
If a list of files is omitted, all changes reported by :hg:`status`
will be committed.
If you are committing the result of a merge, do not provide any
filenames or -I/-X filters.
If no commit message is specified, Mercurial starts your
configured editor where you can enter a message. In case your
commit fails, you will find a backup of your message in
``.hg/last-message.txt``.
The --amend flag can be used to amend the parent of the
working directory with a new commit that contains the changes
in the parent in addition to those currently reported by :hg:`status`,
if there are any. The old commit is stored in a backup bundle in
``.hg/strip-backup`` (see :hg:`help bundle` and :hg:`help unbundle`
on how to restore it).
Message, user and date are taken from the amended commit unless
specified. When a message isn't specified on the command line,
the editor will open with the message of the amended commit.
It is not possible to amend public changesets (see :hg:`help phases`)
or changesets that have children.
See :hg:`help dates` for a list of formats valid for -d/--date.
Returns 0 on success, 1 if nothing changed.
"""
if opts.get('subrepos'):
if opts.get('amend'):
raise util.Abort(_('cannot amend with --subrepos'))
# Let --subrepos on the command line override config setting.
ui.setconfig('ui', 'commitsubrepos', True)
if repo.vfs.exists('graftstate'):
raise util.Abort(_('cannot commit an interrupted graft operation'),
hint=_('use "hg graft -c" to continue graft'))
extra = {}
if opts.get('close_branch'):
extra['close'] = 1
branch = repo[None].branch()
bheads = repo.branchheads(branch)
if opts.get('amend'):
if ui.configbool('ui', 'commitsubrepos'):
raise util.Abort(_('cannot amend with ui.commitsubrepos enabled'))
old = repo['.']
if old.phase() == phases.public:
raise util.Abort(_('cannot amend public changesets'))
if len(repo[None].parents()) > 1:
raise util.Abort(_('cannot amend while merging'))
if (not obsolete._enabled) and old.children():
raise util.Abort(_('cannot amend changeset with children'))
e = cmdutil.commiteditor
if opts.get('force_editor'):
e = cmdutil.commitforceeditor
def commitfunc(ui, repo, message, match, opts):
editor = e
# message contains text from -m or -l, if it's empty,
# open the editor with the old message
if not message:
message = old.description()
editor = cmdutil.commitforceeditor
return repo.commit(message,
opts.get('user') or old.user(),
opts.get('date') or old.date(),
match,
editor=editor,
extra=extra)
current = repo._bookmarkcurrent
marks = old.bookmarks()
node = cmdutil.amend(ui, repo, commitfunc, old, extra, pats, opts)
if node == old.node():
ui.status(_("nothing changed\n"))
return 1
elif marks:
ui.debug('moving bookmarks %r from %s to %s\n' %
(marks, old.hex(), hex(node)))
newmarks = repo._bookmarks
for bm in marks:
newmarks[bm] = node
if bm == current:
bookmarks.setcurrent(repo, bm)
newmarks.write()
else:
e = cmdutil.commiteditor
if opts.get('force_editor'):
e = cmdutil.commitforceeditor
def commitfunc(ui, repo, message, match, opts):
return repo.commit(message, opts.get('user'), opts.get('date'),
match, editor=e, extra=extra)
node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
if not node:
stat = repo.status(match=scmutil.match(repo[None], pats, opts))
if stat[3]:
ui.status(_("nothing changed (%d missing files, see "
"'hg status')\n") % len(stat[3]))
else:
ui.status(_("nothing changed\n"))
return 1
cmdutil.commitstatus(repo, node, branch, bheads, opts)
@command('copy|cp',
[('A', 'after', None, _('record a copy that has already occurred')),
('f', 'force', None, _('forcibly copy over an existing managed file')),
] + walkopts + dryrunopts,
_('[OPTION]... [SOURCE]... DEST'))
def copy(ui, repo, *pats, **opts):
"""mark files as copied for the next commit
Mark dest as having copies of source files. If dest is a
directory, copies are put in that directory. If dest is a file,
the source must be a single file.
By default, this command copies the contents of files as they
exist in the working directory. If invoked with -A/--after, the
operation is recorded, but no copying is performed.
This command takes effect with the next commit. To undo a copy
before that, see :hg:`revert`.
Returns 0 on success, 1 if errors are encountered.
"""
wlock = repo.wlock(False)
try:
return cmdutil.copy(ui, repo, pats, opts)
finally:
wlock.release()
@command('debugancestor', [], _('[INDEX] REV1 REV2'))
def debugancestor(ui, repo, *args):
"""find the ancestor revision of two revisions in a given index"""
if len(args) == 3:
index, rev1, rev2 = args
r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), index)
lookup = r.lookup
elif len(args) == 2:
if not repo:
raise util.Abort(_("there is no Mercurial repository here "
"(.hg not found)"))
rev1, rev2 = args
r = repo.changelog
lookup = repo.lookup
else:
raise util.Abort(_('either two or three arguments required'))
a = r.ancestor(lookup(rev1), lookup(rev2))
ui.write("%d:%s\n" % (r.rev(a), hex(a)))
@command('debugbuilddag',
[('m', 'mergeable-file', None, _('add single file mergeable changes')),
('o', 'overwritten-file', None, _('add single file all revs overwrite')),
('n', 'new-file', None, _('add new file at each rev'))],
_('[OPTION]... [TEXT]'))
def debugbuilddag(ui, repo, text=None,
mergeable_file=False,
overwritten_file=False,
new_file=False):
"""builds a repo with a given DAG from scratch in the current empty repo
The description of the DAG is read from stdin if not given on the
command line.
Elements:
- "+n" is a linear run of n nodes based on the current default parent
- "." is a single node based on the current default parent
- "$" resets the default parent to null (implied at the start);
otherwise the default parent is always the last node created
- "<p" sets the default parent to the backref p
- "*p" is a fork at parent p, which is a backref
- "*p1/p2" is a merge of parents p1 and p2, which are backrefs
- "/p2" is a merge of the preceding node and p2
- ":tag" defines a local tag for the preceding node
- "@branch" sets the named branch for subsequent nodes
- "#...\\n" is a comment up to the end of the line
Whitespace between the above elements is ignored.
A backref is either
- a number n, which references the node curr-n, where curr is the current
node, or
- the name of a local tag you placed earlier using ":tag", or
- empty to denote the default parent.
All string valued-elements are either strictly alphanumeric, or must
be enclosed in double quotes ("..."), with "\\" as escape character.
"""
if text is None:
ui.status(_("reading DAG from stdin\n"))
text = ui.fin.read()
cl = repo.changelog
if len(cl) > 0:
raise util.Abort(_('repository is not empty'))
# determine number of revs in DAG
total = 0
for type, data in dagparser.parsedag(text):
if type == 'n':
total += 1
if mergeable_file:
linesperrev = 2
# make a file with k lines per rev
initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)]
initialmergedlines.append("")
tags = []
lock = tr = None
try:
lock = repo.lock()
tr = repo.transaction("builddag")
at = -1
atbranch = 'default'
nodeids = []
id = 0
ui.progress(_('building'), id, unit=_('revisions'), total=total)
for type, data in dagparser.parsedag(text):
if type == 'n':
ui.note(('node %s\n' % str(data)))
id, ps = data
files = []
fctxs = {}
p2 = None
if mergeable_file:
fn = "mf"
p1 = repo[ps[0]]
if len(ps) > 1:
p2 = repo[ps[1]]
pa = p1.ancestor(p2)
base, local, other = [x[fn].data() for x in (pa, p1,
p2)]
m3 = simplemerge.Merge3Text(base, local, other)
ml = [l.strip() for l in m3.merge_lines()]
ml.append("")
elif at > 0:
ml = p1[fn].data().split("\n")
else:
ml = initialmergedlines
ml[id * linesperrev] += " r%i" % id
mergedtext = "\n".join(ml)
files.append(fn)
fctxs[fn] = context.memfilectx(fn, mergedtext)
if overwritten_file:
fn = "of"
files.append(fn)
fctxs[fn] = context.memfilectx(fn, "r%i\n" % id)
if new_file:
fn = "nf%i" % id
files.append(fn)
fctxs[fn] = context.memfilectx(fn, "r%i\n" % id)
if len(ps) > 1:
if not p2:
p2 = repo[ps[1]]
for fn in p2:
if fn.startswith("nf"):
files.append(fn)
fctxs[fn] = p2[fn]
def fctxfn(repo, cx, path):
return fctxs.get(path)
if len(ps) == 0 or ps[0] < 0:
pars = [None, None]
elif len(ps) == 1:
pars = [nodeids[ps[0]], None]
else:
pars = [nodeids[p] for p in ps]
cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
date=(id, 0),
user="debugbuilddag",
extra={'branch': atbranch})
nodeid = repo.commitctx(cx)
nodeids.append(nodeid)
at = id
elif type == 'l':
id, name = data
ui.note(('tag %s\n' % name))
tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
elif type == 'a':
ui.note(('branch %s\n' % data))
atbranch = data
ui.progress(_('building'), id, unit=_('revisions'), total=total)
tr.close()
if tags:
repo.opener.write("localtags", "".join(tags))
finally:
ui.progress(_('building'), None)
release(tr, lock)
@command('debugbundle', [('a', 'all', None, _('show all details'))], _('FILE'))
def debugbundle(ui, bundlepath, all=None, **opts):
"""lists the contents of a bundle"""
f = hg.openpath(ui, bundlepath)
try:
gen = changegroup.readbundle(f, bundlepath)
if all:
ui.write(("format: id, p1, p2, cset, delta base, len(delta)\n"))
def showchunks(named):
ui.write("\n%s\n" % named)
chain = None
while True:
chunkdata = gen.deltachunk(chain)
if not chunkdata:
break
node = chunkdata['node']
p1 = chunkdata['p1']
p2 = chunkdata['p2']
cs = chunkdata['cs']
deltabase = chunkdata['deltabase']
delta = chunkdata['delta']
ui.write("%s %s %s %s %s %s\n" %
(hex(node), hex(p1), hex(p2),
hex(cs), hex(deltabase), len(delta)))
chain = node
chunkdata = gen.changelogheader()
showchunks("changelog")
chunkdata = gen.manifestheader()
showchunks("manifest")
while True:
chunkdata = gen.filelogheader()
if not chunkdata:
break
fname = chunkdata['filename']
showchunks(fname)
else:
chunkdata = gen.changelogheader()
chain = None
while True:
chunkdata = gen.deltachunk(chain)
if not chunkdata:
break
node = chunkdata['node']
ui.write("%s\n" % hex(node))
chain = node
finally:
f.close()
@command('debugcheckstate', [], '')
def debugcheckstate(ui, repo):
"""validate the correctness of the current dirstate"""
parent1, parent2 = repo.dirstate.parents()
m1 = repo[parent1].manifest()
m2 = repo[parent2].manifest()
errors = 0
for f in repo.dirstate:
state = repo.dirstate[f]
if state in "nr" and f not in m1:
ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
errors += 1
if state in "a" and f in m1:
ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
errors += 1
if state in "m" and f not in m1 and f not in m2:
ui.warn(_("%s in state %s, but not in either manifest\n") %
(f, state))
errors += 1
for f in m1:
state = repo.dirstate[f]
if state not in "nrm":
ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
errors += 1
if errors:
error = _(".hg/dirstate inconsistent with current parent's manifest")
raise util.Abort(error)
@command('debugcommands', [], _('[COMMAND]'))
def debugcommands(ui, cmd='', *args):
"""list all available commands and options"""
for cmd, vals in sorted(table.iteritems()):
cmd = cmd.split('|')[0].strip('^')
opts = ', '.join([i[1] for i in vals[1]])
ui.write('%s: %s\n' % (cmd, opts))
@command('debugcomplete',
[('o', 'options', None, _('show the command options'))],
_('[-o] CMD'))
def debugcomplete(ui, cmd='', **opts):
"""returns the completion list associated with the given command"""
if opts.get('options'):
options = []
otables = [globalopts]
if cmd:
aliases, entry = cmdutil.findcmd(cmd, table, False)
otables.append(entry[1])
for t in otables:
for o in t:
if "(DEPRECATED)" in o[3]:
continue
if o[0]:
options.append('-%s' % o[0])
options.append('--%s' % o[1])
ui.write("%s\n" % "\n".join(options))
return
cmdlist = cmdutil.findpossible(cmd, table)
if ui.verbose:
cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
ui.write("%s\n" % "\n".join(sorted(cmdlist)))
@command('debugdag',
[('t', 'tags', None, _('use tags as labels')),
('b', 'branches', None, _('annotate with branch names')),
('', 'dots', None, _('use dots for runs')),
('s', 'spaces', None, _('separate elements by spaces'))],
_('[OPTION]... [FILE [REV]...]'))
def debugdag(ui, repo, file_=None, *revs, **opts):
"""format the changelog or an index DAG as a concise textual description
If you pass a revlog index, the revlog's DAG is emitted. If you list
revision numbers, they get labeled in the output as rN.
Otherwise, the changelog DAG of the current repo is emitted.
"""
spaces = opts.get('spaces')
dots = opts.get('dots')
if file_:
rlog = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), file_)
revs = set((int(r) for r in revs))
def events():
for r in rlog:
yield 'n', (r, list(set(p for p in rlog.parentrevs(r)
if p != -1)))
if r in revs:
yield 'l', (r, "r%i" % r)
elif repo:
cl = repo.changelog
tags = opts.get('tags')
branches = opts.get('branches')
if tags:
labels = {}
for l, n in repo.tags().items():
labels.setdefault(cl.rev(n), []).append(l)
def events():
b = "default"
for r in cl:
if branches:
newb = cl.read(cl.node(r))[5]['branch']
if newb != b:
yield 'a', newb
b = newb
yield 'n', (r, list(set(p for p in cl.parentrevs(r)
if p != -1)))
if tags:
ls = labels.get(r)
if ls:
for l in ls:
yield 'l', (r, l)
else:
raise util.Abort(_('need repo for changelog dag'))
for line in dagparser.dagtextlines(events(),
addspaces=spaces,
wraplabels=True,
wrapannotations=True,
wrapnonlinear=dots,
usedots=dots,
maxlinewidth=70):
ui.write(line)
ui.write("\n")
@command('debugdata',
[('c', 'changelog', False, _('open changelog')),
('m', 'manifest', False, _('open manifest'))],
_('-c|-m|FILE REV'))
def debugdata(ui, repo, file_, rev = None, **opts):
"""dump the contents of a data file revision"""
if opts.get('changelog') or opts.get('manifest'):
file_, rev = None, file_
elif rev is None:
raise error.CommandError('debugdata', _('invalid arguments'))
r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
try:
ui.write(r.revision(r.lookup(rev)))
except KeyError:
raise util.Abort(_('invalid revision identifier %s') % rev)
@command('debugdate',
[('e', 'extended', None, _('try extended date formats'))],
_('[-e] DATE [RANGE]'))
def debugdate(ui, date, range=None, **opts):
"""parse and display a date"""
if opts["extended"]:
d = util.parsedate(date, util.extendeddateformats)
else:
d = util.parsedate(date)
ui.write(("internal: %s %s\n") % d)
ui.write(("standard: %s\n") % util.datestr(d))
if range:
m = util.matchdate(range)
ui.write(("match: %s\n") % m(d[0]))
@command('debugdiscovery',
[('', 'old', None, _('use old-style discovery')),
('', 'nonheads', None,
_('use old-style discovery with non-heads included')),
] + remoteopts,
_('[-l REV] [-r REV] [-b BRANCH]... [OTHER]'))
def debugdiscovery(ui, repo, remoteurl="default", **opts):
"""runs the changeset discovery protocol in isolation"""
remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl),
opts.get('branch'))
remote = hg.peer(repo, opts, remoteurl)
ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
# make sure tests are repeatable
random.seed(12323)
def doit(localheads, remoteheads, remote=remote):
if opts.get('old'):
if localheads:
raise util.Abort('cannot use localheads with old style '
'discovery')
if not util.safehasattr(remote, 'branches'):
# enable in-client legacy support
remote = localrepo.locallegacypeer(remote.local())
common, _in, hds = treediscovery.findcommonincoming(repo, remote,
force=True)
common = set(common)
if not opts.get('nonheads'):
ui.write(("unpruned common: %s\n") %
" ".join(sorted(short(n) for n in common)))
dag = dagutil.revlogdag(repo.changelog)
all = dag.ancestorset(dag.internalizeall(common))
common = dag.externalizeall(dag.headsetofconnecteds(all))
else:
common, any, hds = setdiscovery.findcommonheads(ui, repo, remote)
common = set(common)
rheads = set(hds)
lheads = set(repo.heads())
ui.write(("common heads: %s\n") %
" ".join(sorted(short(n) for n in common)))
if lheads <= common:
ui.write(("local is subset\n"))
elif rheads <= common:
ui.write(("remote is subset\n"))
serverlogs = opts.get('serverlog')
if serverlogs:
for filename in serverlogs:
logfile = open(filename, 'r')
try:
line = logfile.readline()
while line:
parts = line.strip().split(';')
op = parts[1]
if op == 'cg':
pass
elif op == 'cgss':
doit(parts[2].split(' '), parts[3].split(' '))
elif op == 'unb':
doit(parts[3].split(' '), parts[2].split(' '))
line = logfile.readline()
finally:
logfile.close()
else:
remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches,
opts.get('remote_head'))
localrevs = opts.get('local_head')
doit(localrevs, remoterevs)
@command('debugfileset',
[('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))],
_('[-r REV] FILESPEC'))
def debugfileset(ui, repo, expr, **opts):
'''parse and apply a fileset specification'''
ctx = scmutil.revsingle(repo, opts.get('rev'), None)
if ui.verbose:
tree = fileset.parse(expr)[0]
ui.note(tree, "\n")
for f in fileset.getfileset(ctx, expr):
ui.write("%s\n" % f)
@command('debugfsinfo', [], _('[PATH]'))
def debugfsinfo(ui, path = "."):
"""show information detected about current filesystem"""
util.writefile('.debugfsinfo', '')
ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
ui.write(('case-sensitive: %s\n') % (util.checkcase('.debugfsinfo')
and 'yes' or 'no'))
os.unlink('.debugfsinfo')
@command('debuggetbundle',
[('H', 'head', [], _('id of head node'), _('ID')),
('C', 'common', [], _('id of common node'), _('ID')),
('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
_('REPO FILE [-H|-C ID]...'))
def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
"""retrieves a bundle from a repo
Every ID must be a full-length hex node id string. Saves the bundle to the
given file.
"""
repo = hg.peer(ui, opts, repopath)
if not repo.capable('getbundle'):
raise util.Abort("getbundle() not supported by target repository")
args = {}
if common:
args['common'] = [bin(s) for s in common]
if head:
args['heads'] = [bin(s) for s in head]
bundle = repo.getbundle('debug', **args)
bundletype = opts.get('type', 'bzip2').lower()
btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
bundletype = btypes.get(bundletype)
if bundletype not in changegroup.bundletypes:
raise util.Abort(_('unknown bundle type specified with --type'))
changegroup.writebundle(bundle, bundlepath, bundletype)
@command('debugignore', [], '')
def debugignore(ui, repo, *values, **opts):
"""display the combined ignore pattern"""
ignore = repo.dirstate._ignore
includepat = getattr(ignore, 'includepat', None)
if includepat is not None:
ui.write("%s\n" % includepat)
else:
raise util.Abort(_("no ignore patterns found"))
@command('debugindex',
[('c', 'changelog', False, _('open changelog')),
('m', 'manifest', False, _('open manifest')),
('f', 'format', 0, _('revlog format'), _('FORMAT'))],
_('[-f FORMAT] -c|-m|FILE'))
def debugindex(ui, repo, file_ = None, **opts):
"""dump the contents of an index file"""
r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
format = opts.get('format', 0)
if format not in (0, 1):
raise util.Abort(_("unknown format %d") % format)
generaldelta = r.version & revlog.REVLOGGENERALDELTA
if generaldelta:
basehdr = ' delta'
else:
basehdr = ' base'
if format == 0:
ui.write(" rev offset length " + basehdr + " linkrev"
" nodeid p1 p2\n")
elif format == 1:
ui.write(" rev flag offset length"
" size " + basehdr + " link p1 p2"
" nodeid\n")
for i in r:
node = r.node(i)
if generaldelta:
base = r.deltaparent(i)
else:
base = r.chainbase(i)
if format == 0:
try:
pp = r.parents(node)
except Exception:
pp = [nullid, nullid]
ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
i, r.start(i), r.length(i), base, r.linkrev(i),
short(node), short(pp[0]), short(pp[1])))
elif format == 1:
pr = r.parentrevs(i)
ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
base, r.linkrev(i), pr[0], pr[1], short(node)))
@command('debugindexdot', [], _('FILE'))
def debugindexdot(ui, repo, file_):
"""dump an index DAG as a graphviz dot file"""
r = None
if repo:
filelog = repo.file(file_)
if len(filelog):
r = filelog
if not r:
r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), file_)
ui.write(("digraph G {\n"))
for i in r:
node = r.node(i)
pp = r.parents(node)
ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
if pp[1] != nullid:
ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
ui.write("}\n")
@command('debuginstall', [], '')
def debuginstall(ui):
'''test Mercurial installation
Returns 0 on success.
'''
def writetemp(contents):
(fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
f = os.fdopen(fd, "wb")
f.write(contents)
f.close()
return name
problems = 0
# encoding
ui.status(_("checking encoding (%s)...\n") % encoding.encoding)
try:
encoding.fromlocal("test")
except util.Abort, inst:
ui.write(" %s\n" % inst)
ui.write(_(" (check that your locale is properly set)\n"))
problems += 1
# Python lib
ui.status(_("checking Python lib (%s)...\n")
% os.path.dirname(os.__file__))
# compiled modules
ui.status(_("checking installed modules (%s)...\n")
% os.path.dirname(__file__))
try:
import bdiff, mpatch, base85, osutil
dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
except Exception, inst:
ui.write(" %s\n" % inst)
ui.write(_(" One or more extensions could not be found"))
ui.write(_(" (check that you compiled the extensions)\n"))
problems += 1
# templates
import templater
p = templater.templatepath()
ui.status(_("checking templates (%s)...\n") % ' '.join(p))
try:
templater.templater(templater.templatepath("map-cmdline.default"))
except Exception, inst:
ui.write(" %s\n" % inst)
ui.write(_(" (templates seem to have been installed incorrectly)\n"))
problems += 1
# editor
ui.status(_("checking commit editor...\n"))
editor = ui.geteditor()
cmdpath = util.findexe(editor) or util.findexe(editor.split()[0])
if not cmdpath:
if editor == 'vi':
ui.write(_(" No commit editor set and can't find vi in PATH\n"))
ui.write(_(" (specify a commit editor in your configuration"
" file)\n"))
else:
ui.write(_(" Can't find editor '%s' in PATH\n") % editor)
ui.write(_(" (specify a commit editor in your configuration"
" file)\n"))
problems += 1
# check username
ui.status(_("checking username...\n"))
try:
ui.username()
except util.Abort, e:
ui.write(" %s\n" % e)
ui.write(_(" (specify a username in your configuration file)\n"))
problems += 1
if not problems:
ui.status(_("no problems detected\n"))
else:
ui.write(_("%s problems detected,"
" please check your install!\n") % problems)
return problems
@command('debugknown', [], _('REPO ID...'))
def debugknown(ui, repopath, *ids, **opts):
"""test whether node ids are known to a repo
Every ID must be a full-length hex node id string. Returns a list of 0s
and 1s indicating unknown/known.
"""
repo = hg.peer(ui, opts, repopath)
if not repo.capable('known'):
raise util.Abort("known() not supported by target repository")
flags = repo.known([bin(s) for s in ids])
ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
@command('debuglabelcomplete', [], _('LABEL...'))
def debuglabelcomplete(ui, repo, *args):
'''complete "labels" - tags, open branch names, bookmark names'''
labels = set()
labels.update(t[0] for t in repo.tagslist())
labels.update(repo._bookmarks.keys())
for heads in repo.branchmap().itervalues():
for h in heads:
ctx = repo[h]
if not ctx.closesbranch():
labels.add(ctx.branch())
completions = set()
if not args:
args = ['']
for a in args:
completions.update(l for l in labels if l.startswith(a))
ui.write('\n'.join(sorted(completions)))
ui.write('\n')
@command('debugobsolete',
[('', 'flags', 0, _('markers flag')),
] + commitopts2,
_('[OBSOLETED [REPLACEMENT] [REPL... ]'))
def debugobsolete(ui, repo, precursor=None, *successors, **opts):
"""create arbitrary obsolete marker
With no arguments, displays the list of obsolescence markers."""
def parsenodeid(s):
try:
# We do not use revsingle/revrange functions here to accept
# arbitrary node identifiers, possibly not present in the
# local repository.
n = bin(s)
if len(n) != len(nullid):
raise TypeError()
return n
except TypeError:
raise util.Abort('changeset references must be full hexadecimal '
'node identifiers')
if precursor is not None:
metadata = {}
if 'date' in opts:
metadata['date'] = opts['date']
metadata['user'] = opts['user'] or ui.username()
succs = tuple(parsenodeid(succ) for succ in successors)
l = repo.lock()
try:
tr = repo.transaction('debugobsolete')
try:
repo.obsstore.create(tr, parsenodeid(precursor), succs,
opts['flags'], metadata)
tr.close()
finally:
tr.release()
finally:
l.release()
else:
for m in obsolete.allmarkers(repo):
ui.write(hex(m.precnode()))
for repl in m.succnodes():
ui.write(' ')
ui.write(hex(repl))
ui.write(' %X ' % m._data[2])
ui.write('{%s}' % (', '.join('%r: %r' % t for t in
sorted(m.metadata().items()))))
ui.write('\n')
@command('debugpathcomplete',
[('f', 'full', None, _('complete an entire path')),
('n', 'normal', None, _('show only normal files')),
('a', 'added', None, _('show only added files')),
('r', 'removed', None, _('show only removed files'))],
_('FILESPEC...'))
def debugpathcomplete(ui, repo, *specs, **opts):
'''complete part or all of a tracked path
This command supports shells that offer path name completion. It
currently completes only files already known to the dirstate.
Completion extends only to the next path segment unless
--full is specified, in which case entire paths are used.'''
def complete(path, acceptable):
dirstate = repo.dirstate
spec = os.path.normpath(os.path.join(os.getcwd(), path))
rootdir = repo.root + os.sep
if spec != repo.root and not spec.startswith(rootdir):
return [], []
if os.path.isdir(spec):
spec += '/'
spec = spec[len(rootdir):]
fixpaths = os.sep != '/'
if fixpaths:
spec = spec.replace(os.sep, '/')
speclen = len(spec)
fullpaths = opts['full']
files, dirs = set(), set()
adddir, addfile = dirs.add, files.add
for f, st in dirstate.iteritems():
if f.startswith(spec) and st[0] in acceptable:
if fixpaths:
f = f.replace('/', os.sep)
if fullpaths:
addfile(f)
continue
s = f.find(os.sep, speclen)
if s >= 0:
adddir(f[:s + 1])
else:
addfile(f)
return files, dirs
acceptable = ''
if opts['normal']:
acceptable += 'nm'
if opts['added']:
acceptable += 'a'
if opts['removed']:
acceptable += 'r'
cwd = repo.getcwd()
if not specs:
specs = ['.']
files, dirs = set(), set()
for spec in specs:
f, d = complete(spec, acceptable or 'nmar')
files.update(f)
dirs.update(d)
if not files and len(dirs) == 1:
# force the shell to consider a completion that matches one
# directory and zero files to be ambiguous
dirs.add(iter(dirs).next() + '.')
files.update(dirs)
ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
ui.write('\n')
@command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'))
def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
'''access the pushkey key/value protocol
With two args, list the keys in the given namespace.
With five args, set a key to new if it currently is set to old.
Reports success or failure.
'''
target = hg.peer(ui, {}, repopath)
if keyinfo:
key, old, new = keyinfo
r = target.pushkey(namespace, key, old, new)
ui.status(str(r) + '\n')
return not r
else:
for k, v in sorted(target.listkeys(namespace).iteritems()):
ui.write("%s\t%s\n" % (k.encode('string-escape'),
v.encode('string-escape')))
@command('debugpvec', [], _('A B'))
def debugpvec(ui, repo, a, b=None):
ca = scmutil.revsingle(repo, a)
cb = scmutil.revsingle(repo, b)
pa = pvec.ctxpvec(ca)
pb = pvec.ctxpvec(cb)
if pa == pb:
rel = "="
elif pa > pb:
rel = ">"
elif pa < pb:
rel = "<"
elif pa | pb:
rel = "|"
ui.write(_("a: %s\n") % pa)
ui.write(_("b: %s\n") % pb)
ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
(abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
pa.distance(pb), rel))
@command('debugrebuilddirstate|debugrebuildstate',
[('r', 'rev', '', _('revision to rebuild to'), _('REV'))],
_('[-r REV]'))
def debugrebuilddirstate(ui, repo, rev):
"""rebuild the dirstate as it would look like for the given revision
If no revision is specified the first current parent will be used.
The dirstate will be set to the files of the given revision.
The actual working directory content or existing dirstate
information such as adds or removes is not considered.
One use of this command is to make the next :hg:`status` invocation
check the actual file content.
"""
ctx = scmutil.revsingle(repo, rev)
wlock = repo.wlock()
try:
repo.dirstate.rebuild(ctx.node(), ctx.manifest())
finally:
wlock.release()
@command('debugrename',
[('r', 'rev', '', _('revision to debug'), _('REV'))],
_('[-r REV] FILE'))
def debugrename(ui, repo, file1, *pats, **opts):
"""dump rename information"""
ctx = scmutil.revsingle(repo, opts.get('rev'))
m = scmutil.match(ctx, (file1,) + pats, opts)
for abs in ctx.walk(m):
fctx = ctx[abs]
o = fctx.filelog().renamed(fctx.filenode())
rel = m.rel(abs)
if o:
ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
else:
ui.write(_("%s not renamed\n") % rel)
@command('debugrevlog',
[('c', 'changelog', False, _('open changelog')),
('m', 'manifest', False, _('open manifest')),
('d', 'dump', False, _('dump index data'))],
_('-c|-m|FILE'))
def debugrevlog(ui, repo, file_ = None, **opts):
"""show data and statistics about a revlog"""
r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
if opts.get("dump"):
numrevs = len(r)
ui.write("# rev p1rev p2rev start end deltastart base p1 p2"
" rawsize totalsize compression heads\n")
ts = 0
heads = set()
for rev in xrange(numrevs):
dbase = r.deltaparent(rev)
if dbase == -1:
dbase = rev
cbase = r.chainbase(rev)
p1, p2 = r.parentrevs(rev)
rs = r.rawsize(rev)
ts = ts + rs
heads -= set(r.parentrevs(rev))
heads.add(rev)
ui.write("%d %d %d %d %d %d %d %d %d %d %d %d %d\n" %
(rev, p1, p2, r.start(rev), r.end(rev),
r.start(dbase), r.start(cbase),
r.start(p1), r.start(p2),
rs, ts, ts / r.end(rev), len(heads)))
return 0
v = r.version
format = v & 0xFFFF
flags = []
gdelta = False
if v & revlog.REVLOGNGINLINEDATA:
flags.append('inline')
if v & revlog.REVLOGGENERALDELTA:
gdelta = True
flags.append('generaldelta')
if not flags:
flags = ['(none)']
nummerges = 0
numfull = 0
numprev = 0
nump1 = 0
nump2 = 0
numother = 0
nump1prev = 0
nump2prev = 0
chainlengths = []
datasize = [None, 0, 0L]
fullsize = [None, 0, 0L]
deltasize = [None, 0, 0L]
def addsize(size, l):
if l[0] is None or size < l[0]:
l[0] = size
if size > l[1]:
l[1] = size
l[2] += size
numrevs = len(r)
for rev in xrange(numrevs):
p1, p2 = r.parentrevs(rev)
delta = r.deltaparent(rev)
if format > 0:
addsize(r.rawsize(rev), datasize)
if p2 != nullrev:
nummerges += 1
size = r.length(rev)
if delta == nullrev:
chainlengths.append(0)
numfull += 1
addsize(size, fullsize)
else:
chainlengths.append(chainlengths[delta] + 1)
addsize(size, deltasize)
if delta == rev - 1:
numprev += 1
if delta == p1:
nump1prev += 1
elif delta == p2:
nump2prev += 1
elif delta == p1:
nump1 += 1
elif delta == p2:
nump2 += 1
elif delta != nullrev:
numother += 1
# Adjust size min value for empty cases
for size in (datasize, fullsize, deltasize):
if size[0] is None:
size[0] = 0
numdeltas = numrevs - numfull
numoprev = numprev - nump1prev - nump2prev
totalrawsize = datasize[2]
datasize[2] /= numrevs
fulltotal = fullsize[2]
fullsize[2] /= numfull
deltatotal = deltasize[2]
if numrevs - numfull > 0:
deltasize[2] /= numrevs - numfull
totalsize = fulltotal + deltatotal
avgchainlen = sum(chainlengths) / numrevs
compratio = totalrawsize / totalsize
basedfmtstr = '%%%dd\n'
basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
def dfmtstr(max):
return basedfmtstr % len(str(max))
def pcfmtstr(max, padding=0):
return basepcfmtstr % (len(str(max)), ' ' * padding)
def pcfmt(value, total):
return (value, 100 * float(value) / total)
ui.write(('format : %d\n') % format)
ui.write(('flags : %s\n') % ', '.join(flags))
ui.write('\n')
fmt = pcfmtstr(totalsize)
fmt2 = dfmtstr(totalsize)
ui.write(('revisions : ') + fmt2 % numrevs)
ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
ui.write(('revisions : ') + fmt2 % numrevs)
ui.write((' full : ') + fmt % pcfmt(numfull, numrevs))
ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
ui.write(('revision size : ') + fmt2 % totalsize)
ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize))
ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
ui.write('\n')
fmt = dfmtstr(max(avgchainlen, compratio))
ui.write(('avg chain length : ') + fmt % avgchainlen)
ui.write(('compression ratio : ') + fmt % compratio)
if format > 0:
ui.write('\n')
ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
% tuple(datasize))
ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
% tuple(fullsize))
ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
% tuple(deltasize))
if numdeltas > 0:
ui.write('\n')
fmt = pcfmtstr(numdeltas)
fmt2 = pcfmtstr(numdeltas, 4)
ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
if numprev > 0:
ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
numprev))
ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
numprev))
ui.write((' other : ') + fmt2 % pcfmt(numoprev,
numprev))
if gdelta:
ui.write(('deltas against p1 : ')
+ fmt % pcfmt(nump1, numdeltas))
ui.write(('deltas against p2 : ')
+ fmt % pcfmt(nump2, numdeltas))
ui.write(('deltas against other : ') + fmt % pcfmt(numother,
numdeltas))
@command('debugrevspec', [], ('REVSPEC'))
def debugrevspec(ui, repo, expr):
"""parse and apply a revision specification
Use --verbose to print the parsed tree before and after aliases
expansion.
"""
if ui.verbose:
tree = revset.parse(expr)[0]
ui.note(revset.prettyformat(tree), "\n")
newtree = revset.findaliases(ui, tree)
if newtree != tree:
ui.note(revset.prettyformat(newtree), "\n")
func = revset.match(ui, expr)
for c in func(repo, range(len(repo))):
ui.write("%s\n" % c)
@command('debugsetparents', [], _('REV1 [REV2]'))
def debugsetparents(ui, repo, rev1, rev2=None):
"""manually set the parents of the current working directory
This is useful for writing repository conversion tools, but should
be used with care.
Returns 0 on success.
"""
r1 = scmutil.revsingle(repo, rev1).node()
r2 = scmutil.revsingle(repo, rev2, 'null').node()
wlock = repo.wlock()
try:
repo.setparents(r1, r2)
finally:
wlock.release()
@command('debugdirstate|debugstate',
[('', 'nodates', None, _('do not display the saved mtime')),
('', 'datesort', None, _('sort by saved mtime'))],
_('[OPTION]...'))
def debugstate(ui, repo, nodates=None, datesort=None):
"""show the contents of the current dirstate"""
timestr = ""
showdate = not nodates
if datesort:
keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
else:
keyfunc = None # sort by filename
for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
if showdate:
if ent[3] == -1:
# Pad or slice to locale representation
locale_len = len(time.strftime("%Y-%m-%d %H:%M:%S ",
time.localtime(0)))
timestr = 'unset'
timestr = (timestr[:locale_len] +
' ' * (locale_len - len(timestr)))
else:
timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
time.localtime(ent[3]))
if ent[1] & 020000:
mode = 'lnk'
else:
mode = '%3o' % (ent[1] & 0777 & ~util.umask)
ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
for f in repo.dirstate.copies():
ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
@command('debugsub',
[('r', 'rev', '',
_('revision to check'), _('REV'))],
_('[-r REV] [REV]'))
def debugsub(ui, repo, rev=None):
ctx = scmutil.revsingle(repo, rev, None)
for k, v in sorted(ctx.substate.items()):
ui.write(('path %s\n') % k)
ui.write((' source %s\n') % v[0])
ui.write((' revision %s\n') % v[1])
@command('debugsuccessorssets',
[],
_('[REV]'))
def debugsuccessorssets(ui, repo, *revs):
"""show set of successors for revision
A successors set of changeset A is a consistent group of revisions that
succeed A. It contains non-obsolete changesets only.
In most cases a changeset A has a single successors set containing a single
successor (changeset A replaced by A').
A changeset that is made obsolete with no successors are called "pruned".
Such changesets have no successors sets at all.
A changeset that has been "split" will have a successors set containing
more than one successor.
A changeset that has been rewritten in multiple different ways is called
"divergent". Such changesets have multiple successor sets (each of which
may also be split, i.e. have multiple successors).
Results are displayed as follows::
<rev1>
<successors-1A>
<rev2>
<successors-2A>
<successors-2B1> <successors-2B2> <successors-2B3>
Here rev2 has two possible (i.e. divergent) successors sets. The first
holds one element, whereas the second holds three (i.e. the changeset has
been split).
"""
# passed to successorssets caching computation from one call to another
cache = {}
ctx2str = str
node2str = short
if ui.debug():
def ctx2str(ctx):
return ctx.hex()
node2str = hex
for rev in scmutil.revrange(repo, revs):
ctx = repo[rev]
ui.write('%s\n'% ctx2str(ctx))
for succsset in obsolete.successorssets(repo, ctx.node(), cache):
if succsset:
ui.write(' ')
ui.write(node2str(succsset[0]))
for node in succsset[1:]:
ui.write(' ')
ui.write(node2str(node))
ui.write('\n')
@command('debugwalk', walkopts, _('[OPTION]... [FILE]...'))
def debugwalk(ui, repo, *pats, **opts):
"""show how files match on given patterns"""
m = scmutil.match(repo[None], pats, opts)
items = list(repo.walk(m))
if not items:
return
f = lambda fn: fn
if ui.configbool('ui', 'slash') and os.sep != '/':
f = lambda fn: util.normpath(fn)
fmt = 'f %%-%ds %%-%ds %%s' % (
max([len(abs) for abs in items]),
max([len(m.rel(abs)) for abs in items]))
for abs in items:
line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
ui.write("%s\n" % line.rstrip())
@command('debugwireargs',
[('', 'three', '', 'three'),
('', 'four', '', 'four'),
('', 'five', '', 'five'),
] + remoteopts,
_('REPO [OPTIONS]... [ONE [TWO]]'))
def debugwireargs(ui, repopath, *vals, **opts):
repo = hg.peer(ui, opts, repopath)
for opt in remoteopts:
del opts[opt[1]]
args = {}
for k, v in opts.iteritems():
if v:
args[k] = v
# run twice to check that we don't mess up the stream for the next command
res1 = repo.debugwireargs(*vals, **args)
res2 = repo.debugwireargs(*vals, **args)
ui.write("%s\n" % res1)
if res1 != res2:
ui.warn("%s\n" % res2)
@command('^diff',
[('r', 'rev', [], _('revision'), _('REV')),
('c', 'change', '', _('change made by revision'), _('REV'))
] + diffopts + diffopts2 + walkopts + subrepoopts,
_('[OPTION]... ([-c REV] | [-r REV1 [-r REV2]]) [FILE]...'))
def diff(ui, repo, *pats, **opts):
"""diff repository (or selected files)
Show differences between revisions for the specified files.
Differences between files are shown using the unified diff format.
.. note::
diff may generate unexpected results for merges, as it will
default to comparing against the working directory's first
parent changeset if no revisions are specified.
When two revision arguments are given, then changes are shown
between those revisions. If only one revision is specified then
that revision is compared to the working directory, and, when no
revisions are specified, the working directory files are compared
to its parent.
Alternatively you can specify -c/--change with a revision to see
the changes in that changeset relative to its first parent.
Without the -a/--text option, diff will avoid generating diffs of
files it detects as binary. With -a, diff will generate a diff
anyway, probably with undesirable results.
Use the -g/--git option to generate diffs in the git extended diff
format. For more information, read :hg:`help diffs`.
.. container:: verbose
Examples:
- compare a file in the current working directory to its parent::
hg diff foo.c
- compare two historical versions of a directory, with rename info::
hg diff --git -r 1.0:1.2 lib/
- get change stats relative to the last change on some date::
hg diff --stat -r "date('may 2')"
- diff all newly-added files that contain a keyword::
hg diff "set:added() and grep(GNU)"
- compare a revision and its parents::
hg diff -c 9353 # compare against first parent
hg diff -r 9353^:9353 # same using revset syntax
hg diff -r 9353^2:9353 # compare against the second parent
Returns 0 on success.
"""
revs = opts.get('rev')
change = opts.get('change')
stat = opts.get('stat')
reverse = opts.get('reverse')
if revs and change:
msg = _('cannot specify --rev and --change at the same time')
raise util.Abort(msg)
elif change:
node2 = scmutil.revsingle(repo, change, None).node()
node1 = repo[node2].p1().node()
else:
node1, node2 = scmutil.revpair(repo, revs)
if reverse:
node1, node2 = node2, node1
diffopts = patch.diffopts(ui, opts)
m = scmutil.match(repo[node2], pats, opts)
cmdutil.diffordiffstat(ui, repo, diffopts, node1, node2, m, stat=stat,
listsubrepos=opts.get('subrepos'))
@command('^export',
[('o', 'output', '',
_('print output to file with formatted name'), _('FORMAT')),
('', 'switch-parent', None, _('diff against the second parent')),
('r', 'rev', [], _('revisions to export'), _('REV')),
] + diffopts,
_('[OPTION]... [-o OUTFILESPEC] [-r] [REV]...'))
def export(ui, repo, *changesets, **opts):
"""dump the header and diffs for one or more changesets
Print the changeset header and diffs for one or more revisions.
If no revision is given, the parent of the working directory is used.
The information shown in the changeset header is: author, date,
branch name (if non-default), changeset hash, parent(s) and commit
comment.
.. note::
export may generate unexpected diff output for merge
changesets, as it will compare the merge changeset against its
first parent only.
Output may be to a file, in which case the name of the file is
given using a format string. The formatting rules are as follows:
:``%%``: literal "%" character
:``%H``: changeset hash (40 hexadecimal digits)
:``%N``: number of patches being generated
:``%R``: changeset revision number
:``%b``: basename of the exporting repository
:``%h``: short-form changeset hash (12 hexadecimal digits)
:``%m``: first line of the commit message (only alphanumeric characters)
:``%n``: zero-padded sequence number, starting at 1
:``%r``: zero-padded changeset revision number
Without the -a/--text option, export will avoid generating diffs
of files it detects as binary. With -a, export will generate a
diff anyway, probably with undesirable results.
Use the -g/--git option to generate diffs in the git extended diff
format. See :hg:`help diffs` for more information.
With the --switch-parent option, the diff will be against the
second parent. It can be useful to review a merge.
.. container:: verbose
Examples:
- use export and import to transplant a bugfix to the current
branch::
hg export -r 9353 | hg import -
- export all the changesets between two revisions to a file with
rename information::
hg export --git -r 123:150 > changes.txt
- split outgoing changes into a series of patches with
descriptive names::
hg export -r "outgoing()" -o "%n-%m.patch"
Returns 0 on success.
"""
changesets += tuple(opts.get('rev', []))
if not changesets:
changesets = ['.']
revs = scmutil.revrange(repo, changesets)
if not revs:
raise util.Abort(_("export requires at least one changeset"))
if len(revs) > 1:
ui.note(_('exporting patches:\n'))
else:
ui.note(_('exporting patch:\n'))
cmdutil.export(repo, revs, template=opts.get('output'),
switch_parent=opts.get('switch_parent'),
opts=patch.diffopts(ui, opts))
@command('^forget', walkopts, _('[OPTION]... FILE...'))
def forget(ui, repo, *pats, **opts):
"""forget the specified files on the next commit
Mark the specified files so they will no longer be tracked
after the next commit.
This only removes files from the current branch, not from the
entire project history, and it does not delete them from the
working directory.
To undo a forget before the next commit, see :hg:`add`.
.. container:: verbose
Examples:
- forget newly-added binary files::
hg forget "set:added() and binary()"
- forget files that would be excluded by .hgignore::
hg forget "set:hgignore()"
Returns 0 on success.
"""
if not pats:
raise util.Abort(_('no files specified'))
m = scmutil.match(repo[None], pats, opts)
rejected = cmdutil.forget(ui, repo, m, prefix="", explicitonly=False)[0]
return rejected and 1 or 0
@command(
'graft',
[('r', 'rev', [], _('revisions to graft'), _('REV')),
('c', 'continue', False, _('resume interrupted graft')),
('e', 'edit', False, _('invoke editor on commit messages')),
('', 'log', None, _('append graft info to log message')),
('D', 'currentdate', False,
_('record the current date as commit date')),
('U', 'currentuser', False,
_('record the current user as committer'), _('DATE'))]
+ commitopts2 + mergetoolopts + dryrunopts,
_('[OPTION]... [-r] REV...'))
def graft(ui, repo, *revs, **opts):
'''copy changes from other branches onto the current branch
This command uses Mercurial's merge logic to copy individual
changes from other branches without merging branches in the
history graph. This is sometimes known as 'backporting' or
'cherry-picking'. By default, graft will copy user, date, and
description from the source changesets.
Changesets that are ancestors of the current revision, that have
already been grafted, or that are merges will be skipped.
If --log is specified, log messages will have a comment appended
of the form::
(grafted from CHANGESETHASH)
If a graft merge results in conflicts, the graft process is
interrupted so that the current merge can be manually resolved.
Once all conflicts are addressed, the graft process can be
continued with the -c/--continue option.
.. note::
The -c/--continue option does not reapply earlier options.
.. container:: verbose
Examples:
- copy a single change to the stable branch and edit its description::
hg update stable
hg graft --edit 9393
- graft a range of changesets with one exception, updating dates::
hg graft -D "2085::2093 and not 2091"
- continue a graft after resolving conflicts::
hg graft -c
- show the source of a grafted changeset::
hg log --debug -r tip
Returns 0 on successful completion.
'''
revs = list(revs)
revs.extend(opts['rev'])
if not opts.get('user') and opts.get('currentuser'):
opts['user'] = ui.username()
if not opts.get('date') and opts.get('currentdate'):
opts['date'] = "%d %d" % util.makedate()
editor = None
if opts.get('edit'):
editor = cmdutil.commitforceeditor
cont = False
if opts['continue']:
cont = True
if revs:
raise util.Abort(_("can't specify --continue and revisions"))
# read in unfinished revisions
try:
nodes = repo.opener.read('graftstate').splitlines()
revs = [repo[node].rev() for node in nodes]
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
raise util.Abort(_("no graft state found, can't continue"))
else:
cmdutil.bailifchanged(repo)
if not revs:
raise util.Abort(_('no revisions specified'))
revs = scmutil.revrange(repo, revs)
# check for merges
for rev in repo.revs('%ld and merge()', revs):
ui.warn(_('skipping ungraftable merge revision %s\n') % rev)
revs.remove(rev)
if not revs:
return -1
# check for ancestors of dest branch
crev = repo['.'].rev()
ancestors = repo.changelog.ancestors([crev], inclusive=True)
# don't mutate while iterating, create a copy
for rev in list(revs):
if rev in ancestors:
ui.warn(_('skipping ancestor revision %s\n') % rev)
revs.remove(rev)
if not revs:
return -1
# analyze revs for earlier grafts
ids = {}
for ctx in repo.set("%ld", revs):
ids[ctx.hex()] = ctx.rev()
n = ctx.extra().get('source')
if n:
ids[n] = ctx.rev()
# check ancestors for earlier grafts
ui.debug('scanning for duplicate grafts\n')
for rev in repo.changelog.findmissingrevs(revs, [crev]):
ctx = repo[rev]
n = ctx.extra().get('source')
if n in ids:
r = repo[n].rev()
if r in revs:
ui.warn(_('skipping already grafted revision %s\n') % r)
revs.remove(r)
elif ids[n] in revs:
ui.warn(_('skipping already grafted revision %s '
'(same origin %d)\n') % (ids[n], r))
revs.remove(ids[n])
elif ctx.hex() in ids:
r = ids[ctx.hex()]
ui.warn(_('skipping already grafted revision %s '
'(was grafted from %d)\n') % (r, rev))
revs.remove(r)
if not revs:
return -1
wlock = repo.wlock()
try:
current = repo['.']
for pos, ctx in enumerate(repo.set("%ld", revs)):
ui.status(_('grafting revision %s\n') % ctx.rev())
if opts.get('dry_run'):
continue
source = ctx.extra().get('source')
if not source:
source = ctx.hex()
extra = {'source': source}
user = ctx.user()
if opts.get('user'):
user = opts['user']
date = ctx.date()
if opts.get('date'):
date = opts['date']
message = ctx.description()
if opts.get('log'):
message += '\n(grafted from %s)' % ctx.hex()
# we don't merge the first commit when continuing
if not cont:
# perform the graft merge with p1(rev) as 'ancestor'
try:
# ui.forcemerge is an internal variable, do not document
repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
stats = mergemod.update(repo, ctx.node(), True, True, False,
ctx.p1().node())
finally:
repo.ui.setconfig('ui', 'forcemerge', '')
# report any conflicts
if stats and stats[3] > 0:
# write out state for --continue
nodelines = [repo[rev].hex() + "\n" for rev in revs[pos:]]
repo.opener.write('graftstate', ''.join(nodelines))
raise util.Abort(
_("unresolved conflicts, can't continue"),
hint=_('use hg resolve and hg graft --continue'))
else:
cont = False
# drop the second merge parent
repo.setparents(current.node(), nullid)
repo.dirstate.write()
# fix up dirstate for copies and renames
cmdutil.duplicatecopies(repo, ctx.rev(), ctx.p1().rev())
# commit
node = repo.commit(text=message, user=user,
date=date, extra=extra, editor=editor)
if node is None:
ui.status(_('graft for revision %s is empty\n') % ctx.rev())
else:
current = repo[node]
finally:
wlock.release()
# remove state when we complete successfully
if not opts.get('dry_run'):
util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
return 0
@command('grep',
[('0', 'print0', None, _('end fields with NUL')),
('', 'all', None, _('print all revisions that match')),
('a', 'text', None, _('treat all files as text')),
('f', 'follow', None,
_('follow changeset history,'
' or file history across copies and renames')),
('i', 'ignore-case', None, _('ignore case when matching')),
('l', 'files-with-matches', None,
_('print only filenames and revisions that match')),
('n', 'line-number', None, _('print matching line numbers')),
('r', 'rev', [],
_('only search files changed within revision range'), _('REV')),
('u', 'user', None, _('list the author (long with -v)')),
('d', 'date', None, _('list the date (short with -q)')),
] + walkopts,
_('[OPTION]... PATTERN [FILE]...'))
def grep(ui, repo, pattern, *pats, **opts):
"""search for a pattern in specified files and revisions
Search revisions of files for a regular expression.
This command behaves differently than Unix grep. It only accepts
Python/Perl regexps. It searches repository history, not the
working directory. It always prints the revision number in which a
match appears.
By default, grep only prints output for the first revision of a
file in which it finds a match. To get it to print every revision
that contains a change in match status ("-" for a match that
becomes a non-match, or "+" for a non-match that becomes a match),
use the --all flag.
Returns 0 if a match is found, 1 otherwise.
"""
reflags = re.M
if opts.get('ignore_case'):
reflags |= re.I
try:
regexp = util.compilere(pattern, reflags)
except re.error, inst:
ui.warn(_("grep: invalid match pattern: %s\n") % inst)
return 1
sep, eol = ':', '\n'
if opts.get('print0'):
sep = eol = '\0'
getfile = util.lrucachefunc(repo.file)
def matchlines(body):
begin = 0
linenum = 0
while begin < len(body):
match = regexp.search(body, begin)
if not match:
break
mstart, mend = match.span()
linenum += body.count('\n', begin, mstart) + 1
lstart = body.rfind('\n', begin, mstart) + 1 or begin
begin = body.find('\n', mend) + 1 or len(body) + 1
lend = begin - 1
yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
class linestate(object):
def __init__(self, line, linenum, colstart, colend):
self.line = line
self.linenum = linenum
self.colstart = colstart
self.colend = colend
def __hash__(self):
return hash((self.linenum, self.line))
def __eq__(self, other):
return self.line == other.line
matches = {}
copies = {}
def grepbody(fn, rev, body):
matches[rev].setdefault(fn, [])
m = matches[rev][fn]
for lnum, cstart, cend, line in matchlines(body):
s = linestate(line, lnum, cstart, cend)
m.append(s)
def difflinestates(a, b):
sm = difflib.SequenceMatcher(None, a, b)
for tag, alo, ahi, blo, bhi in sm.get_opcodes():
if tag == 'insert':
for i in xrange(blo, bhi):
yield ('+', b[i])
elif tag == 'delete':
for i in xrange(alo, ahi):
yield ('-', a[i])
elif tag == 'replace':
for i in xrange(alo, ahi):
yield ('-', a[i])
for i in xrange(blo, bhi):
yield ('+', b[i])
def display(fn, ctx, pstates, states):
rev = ctx.rev()
datefunc = ui.quiet and util.shortdate or util.datestr
found = False
filerevmatches = {}
def binary():
flog = getfile(fn)
return util.binary(flog.read(ctx.filenode(fn)))
if opts.get('all'):
iter = difflinestates(pstates, states)
else:
iter = [('', l) for l in states]
for change, l in iter:
cols = [(fn, 'grep.filename'), (str(rev), 'grep.rev')]
before, match, after = None, None, None
if opts.get('line_number'):
cols.append((str(l.linenum), 'grep.linenumber'))
if opts.get('all'):
cols.append((change, 'grep.change'))
if opts.get('user'):
cols.append((ui.shortuser(ctx.user()), 'grep.user'))
if opts.get('date'):
cols.append((datefunc(ctx.date()), 'grep.date'))
if opts.get('files_with_matches'):
c = (fn, rev)
if c in filerevmatches:
continue
filerevmatches[c] = 1
else:
before = l.line[:l.colstart]
match = l.line[l.colstart:l.colend]
after = l.line[l.colend:]
for col, label in cols[:-1]:
ui.write(col, label=label)
ui.write(sep, label='grep.sep')
ui.write(cols[-1][0], label=cols[-1][1])
if before is not None:
ui.write(sep, label='grep.sep')
if not opts.get('text') and binary():
ui.write(" Binary file matches")
else:
ui.write(before)
ui.write(match, label='grep.match')
ui.write(after)
ui.write(eol)
found = True
return found
skip = {}
revfiles = {}
matchfn = scmutil.match(repo[None], pats, opts)
found = False
follow = opts.get('follow')
def prep(ctx, fns):
rev = ctx.rev()
pctx = ctx.p1()
parent = pctx.rev()
matches.setdefault(rev, {})
matches.setdefault(parent, {})
files = revfiles.setdefault(rev, [])
for fn in fns:
flog = getfile(fn)
try:
fnode = ctx.filenode(fn)
except error.LookupError:
continue
copied = flog.renamed(fnode)
copy = follow and copied and copied[0]
if copy:
copies.setdefault(rev, {})[fn] = copy
if fn in skip:
if copy:
skip[copy] = True
continue
files.append(fn)
if fn not in matches[rev]:
grepbody(fn, rev, flog.read(fnode))
pfn = copy or fn
if pfn not in matches[parent]:
try:
fnode = pctx.filenode(pfn)
grepbody(pfn, parent, flog.read(fnode))
except error.LookupError:
pass
for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
rev = ctx.rev()
parent = ctx.p1().rev()
for fn in sorted(revfiles.get(rev, [])):
states = matches[rev][fn]
copy = copies.get(rev, {}).get(fn)
if fn in skip:
if copy:
skip[copy] = True
continue
pstates = matches.get(parent, {}).get(copy or fn, [])
if pstates or states:
r = display(fn, ctx, pstates, states)
found = found or r
if r and not opts.get('all'):
skip[fn] = True
if copy:
skip[copy] = True
del matches[rev]
del revfiles[rev]
return not found
@command('heads',
[('r', 'rev', '',
_('show only heads which are descendants of STARTREV'), _('STARTREV')),
('t', 'topo', False, _('show topological heads only')),
('a', 'active', False, _('show active branchheads only (DEPRECATED)')),
('c', 'closed', False, _('show normal and closed branch heads')),
] + templateopts,
_('[-ct] [-r STARTREV] [REV]...'))
def heads(ui, repo, *branchrevs, **opts):
"""show current repository heads or show branch heads
With no arguments, show all repository branch heads.
Repository "heads" are changesets with no child changesets. They are
where development generally takes place and are the usual targets
for update and merge operations. Branch heads are changesets that have
no child changeset on the same branch.
If one or more REVs are given, only branch heads on the branches
associated with the specified changesets are shown. This means
that you can use :hg:`heads foo` to see the heads on a branch
named ``foo``.
If -c/--closed is specified, also show branch heads marked closed
(see :hg:`commit --close-branch`).
If STARTREV is specified, only those heads that are descendants of
STARTREV will be displayed.
If -t/--topo is specified, named branch mechanics will be ignored and only
changesets without children will be shown.
Returns 0 if matching heads are found, 1 if not.
"""
start = None
if 'rev' in opts:
start = scmutil.revsingle(repo, opts['rev'], None).node()
if opts.get('topo'):
heads = [repo[h] for h in repo.heads(start)]
else:
heads = []
for branch in repo.branchmap():
heads += repo.branchheads(branch, start, opts.get('closed'))
heads = [repo[h] for h in heads]
if branchrevs:
branches = set(repo[br].branch() for br in branchrevs)
heads = [h for h in heads if h.branch() in branches]
if opts.get('active') and branchrevs:
dagheads = repo.heads(start)
heads = [h for h in heads if h.node() in dagheads]
if branchrevs:
haveheads = set(h.branch() for h in heads)
if branches - haveheads:
headless = ', '.join(b for b in branches - haveheads)
msg = _('no open branch heads found on branches %s')
if opts.get('rev'):
msg += _(' (started at %s)') % opts['rev']
ui.warn((msg + '\n') % headless)
if not heads:
return 1
heads = sorted(heads, key=lambda x: -x.rev())
displayer = cmdutil.show_changeset(ui, repo, opts)
for ctx in heads:
displayer.show(ctx)
displayer.close()
@command('help',
[('e', 'extension', None, _('show only help for extensions')),
('c', 'command', None, _('show only help for commands')),
('k', 'keyword', '', _('show topics matching keyword')),
],
_('[-ec] [TOPIC]'))
def help_(ui, name=None, **opts):
"""show help for a given topic or a help overview
With no arguments, print a list of commands with short help messages.
Given a topic, extension, or command name, print help for that
topic.
Returns 0 if successful.
"""
textwidth = min(ui.termwidth(), 80) - 2
keep = ui.verbose and ['verbose'] or []
text = help.help_(ui, name, **opts)
formatted, pruned = minirst.format(text, textwidth, keep=keep)
if 'verbose' in pruned:
keep.append('omitted')
else:
keep.append('notomitted')
formatted, pruned = minirst.format(text, textwidth, keep=keep)
ui.write(formatted)
@command('identify|id',
[('r', 'rev', '',
_('identify the specified revision'), _('REV')),
('n', 'num', None, _('show local revision number')),
('i', 'id', None, _('show global revision id')),
('b', 'branch', None, _('show branch')),
('t', 'tags', None, _('show tags')),
('B', 'bookmarks', None, _('show bookmarks')),
] + remoteopts,
_('[-nibtB] [-r REV] [SOURCE]'))
def identify(ui, repo, source=None, rev=None,
num=None, id=None, branch=None, tags=None, bookmarks=None, **opts):
"""identify the working copy or specified revision
Print a summary identifying the repository state at REV using one or
two parent hash identifiers, followed by a "+" if the working
directory has uncommitted changes, the branch name (if not default),
a list of tags, and a list of bookmarks.
When REV is not given, print a summary of the current state of the
repository.
Specifying a path to a repository root or Mercurial bundle will
cause lookup to operate on that repository/bundle.
.. container:: verbose
Examples:
- generate a build identifier for the working directory::
hg id --id > build-id.dat
- find the revision corresponding to a tag::
hg id -n -r 1.3
- check the most recent revision of a remote repository::
hg id -r tip http://selenic.com/hg/
Returns 0 if successful.
"""
if not repo and not source:
raise util.Abort(_("there is no Mercurial repository here "
"(.hg not found)"))
hexfunc = ui.debugflag and hex or short
default = not (num or id or branch or tags or bookmarks)
output = []
revs = []
if source:
source, branches = hg.parseurl(ui.expandpath(source))
peer = hg.peer(repo or ui, opts, source) # only pass ui when no repo
repo = peer.local()
revs, checkout = hg.addbranchrevs(repo, peer, branches, None)
if not repo:
if num or branch or tags:
raise util.Abort(
_("can't query remote revision number, branch, or tags"))
if not rev and revs:
rev = revs[0]
if not rev:
rev = "tip"
remoterev = peer.lookup(rev)
if default or id:
output = [hexfunc(remoterev)]
def getbms():
bms = []
if 'bookmarks' in peer.listkeys('namespaces'):
hexremoterev = hex(remoterev)
bms = [bm for bm, bmr in peer.listkeys('bookmarks').iteritems()
if bmr == hexremoterev]
return sorted(bms)
if bookmarks:
output.extend(getbms())
elif default and not ui.quiet:
# multiple bookmarks for a single parent separated by '/'
bm = '/'.join(getbms())
if bm:
output.append(bm)
else:
if not rev:
ctx = repo[None]
parents = ctx.parents()
changed = ""
if default or id or num:
if (util.any(repo.status())
or util.any(ctx.sub(s).dirty() for s in ctx.substate)):
changed = '+'
if default or id:
output = ["%s%s" %
('+'.join([hexfunc(p.node()) for p in parents]), changed)]
if num:
output.append("%s%s" %
('+'.join([str(p.rev()) for p in parents]), changed))
else:
ctx = scmutil.revsingle(repo, rev)
if default or id:
output = [hexfunc(ctx.node())]
if num:
output.append(str(ctx.rev()))
if default and not ui.quiet:
b = ctx.branch()
if b != 'default':
output.append("(%s)" % b)
# multiple tags for a single parent separated by '/'
t = '/'.join(ctx.tags())
if t:
output.append(t)
# multiple bookmarks for a single parent separated by '/'
bm = '/'.join(ctx.bookmarks())
if bm:
output.append(bm)
else:
if branch:
output.append(ctx.branch())
if tags:
output.extend(ctx.tags())
if bookmarks:
output.extend(ctx.bookmarks())
ui.write("%s\n" % ' '.join(output))
@command('import|patch',
[('p', 'strip', 1,
_('directory strip option for patch. This has the same '
'meaning as the corresponding patch option'), _('NUM')),
('b', 'base', '', _('base path (DEPRECATED)'), _('PATH')),
('e', 'edit', False, _('invoke editor on commit messages')),
('f', 'force', None, _('skip check for outstanding uncommitted changes')),
('', 'no-commit', None,
_("don't commit, just update the working directory")),
('', 'bypass', None,
_("apply patch without touching the working directory")),
('', 'exact', None,
_('apply patch to the nodes from which it was generated')),
('', 'import-branch', None,
_('use any branch information in patch (implied by --exact)'))] +
commitopts + commitopts2 + similarityopts,
_('[OPTION]... PATCH...'))
def import_(ui, repo, patch1=None, *patches, **opts):
"""import an ordered set of patches
Import a list of patches and commit them individually (unless
--no-commit is specified).
If there are outstanding changes in the working directory, import
will abort unless given the -f/--force flag.
You can import a patch straight from a mail message. Even patches
as attachments work (to use the body part, it must have type
text/plain or text/x-patch). From and Subject headers of email
message are used as default committer and commit message. All
text/plain body parts before first diff are added to commit
message.
If the imported patch was generated by :hg:`export`, user and
description from patch override values from message headers and
body. Values given on command line with -m/--message and -u/--user
override these.
If --exact is specified, import will set the working directory to
the parent of each patch before applying it, and will abort if the
resulting changeset has a different ID than the one recorded in
the patch. This may happen due to character set problems or other
deficiencies in the text patch format.
Use --bypass to apply and commit patches directly to the
repository, not touching the working directory. Without --exact,
patches will be applied on top of the working directory parent
revision.
With -s/--similarity, hg will attempt to discover renames and
copies in the patch in the same way as :hg:`addremove`.
To read a patch from standard input, use "-" as the patch name. If
a URL is specified, the patch will be downloaded from it.
See :hg:`help dates` for a list of formats valid for -d/--date.
.. container:: verbose
Examples:
- import a traditional patch from a website and detect renames::
hg import -s 80 http://example.com/bugfix.patch
- import a changeset from an hgweb server::
hg import http://www.selenic.com/hg/rev/5ca8c111e9aa
- import all the patches in an Unix-style mbox::
hg import incoming-patches.mbox
- attempt to exactly restore an exported changeset (not always
possible)::
hg import --exact proposed-fix.patch
Returns 0 on success.
"""
if not patch1:
raise util.Abort(_('need at least one patch to import'))
patches = (patch1,) + patches
date = opts.get('date')
if date:
opts['date'] = util.parsedate(date)
editor = cmdutil.commiteditor
if opts.get('edit'):
editor = cmdutil.commitforceeditor
update = not opts.get('bypass')
if not update and opts.get('no_commit'):
raise util.Abort(_('cannot use --no-commit with --bypass'))
try:
sim = float(opts.get('similarity') or 0)
except ValueError:
raise util.Abort(_('similarity must be a number'))
if sim < 0 or sim > 100:
raise util.Abort(_('similarity must be between 0 and 100'))
if sim and not update:
raise util.Abort(_('cannot use --similarity with --bypass'))
if (opts.get('exact') or not opts.get('force')) and update:
cmdutil.bailifchanged(repo)
base = opts["base"]
strip = opts["strip"]
wlock = lock = tr = None
msgs = []
def tryone(ui, hunk, parents):
tmpname, message, user, date, branch, nodeid, p1, p2 = \
patch.extract(ui, hunk)
if not tmpname:
return (None, None)
msg = _('applied to working directory')
try:
cmdline_message = cmdutil.logmessage(ui, opts)
if cmdline_message:
# pickup the cmdline msg
message = cmdline_message
elif message:
# pickup the patch msg
message = message.strip()
else:
# launch the editor
message = None
ui.debug('message:\n%s\n' % message)
if len(parents) == 1:
parents.append(repo[nullid])
if opts.get('exact'):
if not nodeid or not p1:
raise util.Abort(_('not a Mercurial patch'))
p1 = repo[p1]
p2 = repo[p2 or nullid]
elif p2:
try:
p1 = repo[p1]
p2 = repo[p2]
# Without any options, consider p2 only if the
# patch is being applied on top of the recorded
# first parent.
if p1 != parents[0]:
p1 = parents[0]
p2 = repo[nullid]
except error.RepoError:
p1, p2 = parents
else:
p1, p2 = parents
n = None
if update:
if p1 != parents[0]:
hg.clean(repo, p1.node())
if p2 != parents[1]:
repo.setparents(p1.node(), p2.node())
if opts.get('exact') or opts.get('import_branch'):
repo.dirstate.setbranch(branch or 'default')
files = set()
patch.patch(ui, repo, tmpname, strip=strip, files=files,
eolmode=None, similarity=sim / 100.0)
files = list(files)
if opts.get('no_commit'):
if message:
msgs.append(message)
else:
if opts.get('exact') or p2:
# If you got here, you either use --force and know what
# you are doing or used --exact or a merge patch while
# being updated to its first parent.
m = None
else:
m = scmutil.matchfiles(repo, files or [])
n = repo.commit(message, opts.get('user') or user,
opts.get('date') or date, match=m,
editor=editor)
else:
if opts.get('exact') or opts.get('import_branch'):
branch = branch or 'default'
else:
branch = p1.branch()
store = patch.filestore()
try:
files = set()
try:
patch.patchrepo(ui, repo, p1, store, tmpname, strip,
files, eolmode=None)
except patch.PatchError, e:
raise util.Abort(str(e))
memctx = patch.makememctx(repo, (p1.node(), p2.node()),
message,
opts.get('user') or user,
opts.get('date') or date,
branch, files, store,
editor=cmdutil.commiteditor)
repo.savecommitmessage(memctx.description())
n = memctx.commit()
finally:
store.close()
if opts.get('exact') and hex(n) != nodeid:
raise util.Abort(_('patch is damaged or loses information'))
if n:
# i18n: refers to a short changeset id
msg = _('created %s') % short(n)
return (msg, n)
finally:
os.unlink(tmpname)
try:
try:
wlock = repo.wlock()
if not opts.get('no_commit'):
lock = repo.lock()
tr = repo.transaction('import')
parents = repo.parents()
for patchurl in patches:
if patchurl == '-':
ui.status(_('applying patch from stdin\n'))
patchfile = ui.fin
patchurl = 'stdin' # for error message
else:
patchurl = os.path.join(base, patchurl)
ui.status(_('applying %s\n') % patchurl)
patchfile = hg.openpath(ui, patchurl)
haspatch = False
for hunk in patch.split(patchfile):
(msg, node) = tryone(ui, hunk, parents)
if msg:
haspatch = True
ui.note(msg + '\n')
if update or opts.get('exact'):
parents = repo.parents()
else:
parents = [repo[node]]
if not haspatch:
raise util.Abort(_('%s: no diffs found') % patchurl)
if tr:
tr.close()
if msgs:
repo.savecommitmessage('\n* * *\n'.join(msgs))
except: # re-raises
# wlock.release() indirectly calls dirstate.write(): since
# we're crashing, we do not want to change the working dir
# parent after all, so make sure it writes nothing
repo.dirstate.invalidate()
raise
finally:
if tr:
tr.release()
release(lock, wlock)
@command('incoming|in',
[('f', 'force', None,
_('run even if remote repository is unrelated')),
('n', 'newest-first', None, _('show newest record first')),
('', 'bundle', '',
_('file to store the bundles into'), _('FILE')),
('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')),
('B', 'bookmarks', False, _("compare bookmarks")),
('b', 'branch', [],
_('a specific branch you would like to pull'), _('BRANCH')),
] + logopts + remoteopts + subrepoopts,
_('[-p] [-n] [-M] [-f] [-r REV]... [--bundle FILENAME] [SOURCE]'))
def incoming(ui, repo, source="default", **opts):
"""show new changesets found in source
Show new changesets found in the specified path/URL or the default
pull location. These are the changesets that would have been pulled
if a pull at the time you issued this command.
For remote repository, using --bundle avoids downloading the
changesets twice if the incoming is followed by a pull.
See pull for valid source format details.
Returns 0 if there are incoming changes, 1 otherwise.
"""
if opts.get('graph'):
cmdutil.checkunsupportedgraphflags([], opts)
def display(other, chlist, displayer):
revdag = cmdutil.graphrevs(other, chlist, opts)
showparents = [ctx.node() for ctx in repo[None].parents()]
cmdutil.displaygraph(ui, revdag, displayer, showparents,
graphmod.asciiedges)
hg._incoming(display, lambda: 1, ui, repo, source, opts, buffered=True)
return 0
if opts.get('bundle') and opts.get('subrepos'):
raise util.Abort(_('cannot combine --bundle and --subrepos'))
if opts.get('bookmarks'):
source, branches = hg.parseurl(ui.expandpath(source),
opts.get('branch'))
other = hg.peer(repo, opts, source)
if 'bookmarks' not in other.listkeys('namespaces'):
ui.warn(_("remote doesn't support bookmarks\n"))
return 0
ui.status(_('comparing with %s\n') % util.hidepassword(source))
return bookmarks.diff(ui, repo, other)
repo._subtoppath = ui.expandpath(source)
try:
return hg.incoming(ui, repo, source, opts)
finally:
del repo._subtoppath
@command('^init', remoteopts, _('[-e CMD] [--remotecmd CMD] [DEST]'))
def init(ui, dest=".", **opts):
"""create a new repository in the given directory
Initialize a new repository in the given directory. If the given
directory does not exist, it will be created.
If no directory is given, the current directory is used.
It is possible to specify an ``ssh://`` URL as the destination.
See :hg:`help urls` for more information.
Returns 0 on success.
"""
hg.peer(ui, opts, ui.expandpath(dest), create=True)
@command('locate',
[('r', 'rev', '', _('search the repository as it is in REV'), _('REV')),
('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
('f', 'fullpath', None, _('print complete paths from the filesystem root')),
] + walkopts,
_('[OPTION]... [PATTERN]...'))
def locate(ui, repo, *pats, **opts):
"""locate files matching specific patterns
Print files under Mercurial control in the working directory whose
names match the given patterns.
By default, this command searches all directories in the working
directory. To search just the current directory and its
subdirectories, use "--include .".
If no patterns are given to match, this command prints the names
of all files under Mercurial control in the working directory.
If you want to feed the output of this command into the "xargs"
command, use the -0 option to both this command and "xargs". This
will avoid the problem of "xargs" treating single filenames that
contain whitespace as multiple filenames.
Returns 0 if a match is found, 1 otherwise.
"""
end = opts.get('print0') and '\0' or '\n'
rev = scmutil.revsingle(repo, opts.get('rev'), None).node()
ret = 1
m = scmutil.match(repo[rev], pats, opts, default='relglob')
m.bad = lambda x, y: False
for abs in repo[rev].walk(m):
if not rev and abs not in repo.dirstate:
continue
if opts.get('fullpath'):
ui.write(repo.wjoin(abs), end)
else:
ui.write(((pats and m.rel(abs)) or abs), end)
ret = 0
return ret
@command('^log|history',
[('f', 'follow', None,
_('follow changeset history, or file history across copies and renames')),
('', 'follow-first', None,
_('only follow the first parent of merge changesets (DEPRECATED)')),
('d', 'date', '', _('show revisions matching date spec'), _('DATE')),
('C', 'copies', None, _('show copied files')),
('k', 'keyword', [],
_('do case-insensitive search for a given text'), _('TEXT')),
('r', 'rev', [], _('show the specified revision or range'), _('REV')),
('', 'removed', None, _('include revisions where files were removed')),
('m', 'only-merges', None, _('show only merges (DEPRECATED)')),
('u', 'user', [], _('revisions committed by user'), _('USER')),
('', 'only-branch', [],
_('show only changesets within the given named branch (DEPRECATED)'),
_('BRANCH')),
('b', 'branch', [],
_('show changesets within the given named branch'), _('BRANCH')),
('P', 'prune', [],
_('do not display revision or any of its ancestors'), _('REV')),
] + logopts + walkopts,
_('[OPTION]... [FILE]'))
def log(ui, repo, *pats, **opts):
"""show revision history of entire repository or files
Print the revision history of the specified files or the entire
project.
If no revision range is specified, the default is ``tip:0`` unless
--follow is set, in which case the working directory parent is
used as the starting revision.
File history is shown without following rename or copy history of
files. Use -f/--follow with a filename to follow history across
renames and copies. --follow without a filename will only show
ancestors or descendants of the starting revision.
By default this command prints revision number and changeset id,
tags, non-trivial parents, user, date and time, and a summary for
each commit. When the -v/--verbose switch is used, the list of
changed files and full commit message are shown.
.. note::
log -p/--patch may generate unexpected diff output for merge
changesets, as it will only compare the merge changeset against
its first parent. Also, only files different from BOTH parents
will appear in files:.
.. note::
for performance reasons, log FILE may omit duplicate changes
made on branches and will not show deletions. To see all
changes including duplicates and deletions, use the --removed
switch.
.. container:: verbose
Some examples:
- changesets with full descriptions and file lists::
hg log -v
- changesets ancestral to the working directory::
hg log -f
- last 10 commits on the current branch::
hg log -l 10 -b .
- changesets showing all modifications of a file, including removals::
hg log --removed file.c
- all changesets that touch a directory, with diffs, excluding merges::
hg log -Mp lib/
- all revision numbers that match a keyword::
hg log -k bug --template "{rev}\\n"
- check if a given changeset is included is a tagged release::
hg log -r "a21ccf and ancestor(1.9)"
- find all changesets by some user in a date range::
hg log -k alice -d "may 2008 to jul 2008"
- summary of all changesets after the last tag::
hg log -r "last(tagged())::" --template "{desc|firstline}\\n"
See :hg:`help dates` for a list of formats valid for -d/--date.
See :hg:`help revisions` and :hg:`help revsets` for more about
specifying revisions.
See :hg:`help templates` for more about pre-packaged styles and
specifying custom templates.
Returns 0 on success.
"""
if opts.get('graph'):
return cmdutil.graphlog(ui, repo, *pats, **opts)
matchfn = scmutil.match(repo[None], pats, opts)
limit = cmdutil.loglimit(opts)
count = 0
getrenamed, endrev = None, None
if opts.get('copies'):
if opts.get('rev'):
endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
df = False
if opts.get("date"):
df = util.matchdate(opts["date"])
branches = opts.get('branch', []) + opts.get('only_branch', [])
opts['branch'] = [repo.lookupbranch(b) for b in branches]
displayer = cmdutil.show_changeset(ui, repo, opts, True)
def prep(ctx, fns):
rev = ctx.rev()
parents = [p for p in repo.changelog.parentrevs(rev)
if p != nullrev]
if opts.get('no_merges') and len(parents) == 2:
return
if opts.get('only_merges') and len(parents) != 2:
return
if opts.get('branch') and ctx.branch() not in opts['branch']:
return
if df and not df(ctx.date()[0]):
return
lower = encoding.lower
if opts.get('user'):
luser = lower(ctx.user())
for k in [lower(x) for x in opts['user']]:
if (k in luser):
break
else:
return
if opts.get('keyword'):
luser = lower(ctx.user())
ldesc = lower(ctx.description())
lfiles = lower(" ".join(ctx.files()))
for k in [lower(x) for x in opts['keyword']]:
if (k in luser or k in ldesc or k in lfiles):
break
else:
return
copies = None
if getrenamed is not None and rev:
copies = []
for fn in ctx.files():
rename = getrenamed(fn, rev)
if rename:
copies.append((fn, rename[0]))
revmatchfn = None
if opts.get('patch') or opts.get('stat'):
if opts.get('follow') or opts.get('follow_first'):
# note: this might be wrong when following through merges
revmatchfn = scmutil.match(repo[None], fns, default='path')
else:
revmatchfn = matchfn
displayer.show(ctx, copies=copies, matchfn=revmatchfn)
for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
if displayer.flush(ctx.rev()):
count += 1
if count == limit:
break
displayer.close()
@command('manifest',
[('r', 'rev', '', _('revision to display'), _('REV')),
('', 'all', False, _("list files from all revisions"))],
_('[-r REV]'))
def manifest(ui, repo, node=None, rev=None, **opts):
"""output the current or given revision of the project manifest
Print a list of version controlled files for the given revision.
If no revision is given, the first parent of the working directory
is used, or the null revision if no revision is checked out.
With -v, print file permissions, symlink and executable bits.
With --debug, print file revision hashes.
If option --all is specified, the list of all files from all revisions
is printed. This includes deleted and renamed files.
Returns 0 on success.
"""
fm = ui.formatter('manifest', opts)
if opts.get('all'):
if rev or node:
raise util.Abort(_("can't specify a revision with --all"))
res = []
prefix = "data/"
suffix = ".i"
plen = len(prefix)
slen = len(suffix)
lock = repo.lock()
try:
for fn, b, size in repo.store.datafiles():
if size != 0 and fn[-slen:] == suffix and fn[:plen] == prefix:
res.append(fn[plen:-slen])
finally:
lock.release()
for f in res:
fm.startitem()
fm.write("path", '%s\n', f)
fm.end()
return
if rev and node:
raise util.Abort(_("please specify just one revision"))
if not node:
node = rev
char = {'l': '@', 'x': '*', '': ''}
mode = {'l': '644', 'x': '755', '': '644'}
ctx = scmutil.revsingle(repo, node)
mf = ctx.manifest()
for f in ctx:
fm.startitem()
fl = ctx[f].flags()
fm.condwrite(ui.debugflag, 'hash', '%s ', hex(mf[f]))
fm.condwrite(ui.verbose, 'mode type', '%s %1s ', mode[fl], char[fl])
fm.write('path', '%s\n', f)
fm.end()
@command('^merge',
[('f', 'force', None, _('force a merge with outstanding changes')),
('r', 'rev', '', _('revision to merge'), _('REV')),
('P', 'preview', None,
_('review revisions to merge (no merge is performed)'))
] + mergetoolopts,
_('[-P] [-f] [[-r] REV]'))
def merge(ui, repo, node=None, **opts):
"""merge working directory with another revision
The current working directory is updated with all changes made in
the requested revision since the last common predecessor revision.
Files that changed between either parent are marked as changed for
the next commit and a commit must be performed before any further
updates to the repository are allowed. The next commit will have
two parents.
``--tool`` can be used to specify the merge tool used for file
merges. It overrides the HGMERGE environment variable and your
configuration files. See :hg:`help merge-tools` for options.
If no revision is specified, the working directory's parent is a
head revision, and the current branch contains exactly one other
head, the other head is merged with by default. Otherwise, an
explicit revision with which to merge with must be provided.
:hg:`resolve` must be used to resolve unresolved files.
To undo an uncommitted merge, use :hg:`update --clean .` which
will check out a clean copy of the original merge parent, losing
all changes.
Returns 0 on success, 1 if there are unresolved files.
"""
if opts.get('rev') and node:
raise util.Abort(_("please specify just one revision"))
if not node:
node = opts.get('rev')
if node:
node = scmutil.revsingle(repo, node).node()
if not node and repo._bookmarkcurrent:
bmheads = repo.bookmarkheads(repo._bookmarkcurrent)
curhead = repo[repo._bookmarkcurrent].node()
if len(bmheads) == 2:
if curhead == bmheads[0]:
node = bmheads[1]
else:
node = bmheads[0]
elif len(bmheads) > 2:
raise util.Abort(_("multiple matching bookmarks to merge - "
"please merge with an explicit rev or bookmark"),
hint=_("run 'hg heads' to see all heads"))
elif len(bmheads) <= 1:
raise util.Abort(_("no matching bookmark to merge - "
"please merge with an explicit rev or bookmark"),
hint=_("run 'hg heads' to see all heads"))
if not node and not repo._bookmarkcurrent:
branch = repo[None].branch()
bheads = repo.branchheads(branch)
nbhs = [bh for bh in bheads if not repo[bh].bookmarks()]
if len(nbhs) > 2:
raise util.Abort(_("branch '%s' has %d heads - "
"please merge with an explicit rev")
% (branch, len(bheads)),
hint=_("run 'hg heads .' to see heads"))
parent = repo.dirstate.p1()
if len(nbhs) <= 1:
if len(bheads) > 1:
raise util.Abort(_("heads are bookmarked - "
"please merge with an explicit rev"),
hint=_("run 'hg heads' to see all heads"))
if len(repo.heads()) > 1:
raise util.Abort(_("branch '%s' has one head - "
"please merge with an explicit rev")
% branch,
hint=_("run 'hg heads' to see all heads"))
msg, hint = _('nothing to merge'), None
if parent != repo.lookup(branch):
hint = _("use 'hg update' instead")
raise util.Abort(msg, hint=hint)
if parent not in bheads:
raise util.Abort(_('working directory not at a head revision'),
hint=_("use 'hg update' or merge with an "
"explicit revision"))
if parent == nbhs[0]:
node = nbhs[-1]
else:
node = nbhs[0]
if opts.get('preview'):
# find nodes that are ancestors of p2 but not of p1
p1 = repo.lookup('.')
p2 = repo.lookup(node)
nodes = repo.changelog.findmissing(common=[p1], heads=[p2])
displayer = cmdutil.show_changeset(ui, repo, opts)
for node in nodes:
displayer.show(repo[node])
displayer.close()
return 0
try:
# ui.forcemerge is an internal variable, do not document
repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
return hg.merge(repo, node, force=opts.get('force'))
finally:
ui.setconfig('ui', 'forcemerge', '')
@command('outgoing|out',
[('f', 'force', None, _('run even when the destination is unrelated')),
('r', 'rev', [],
_('a changeset intended to be included in the destination'), _('REV')),
('n', 'newest-first', None, _('show newest record first')),
('B', 'bookmarks', False, _('compare bookmarks')),
('b', 'branch', [], _('a specific branch you would like to push'),
_('BRANCH')),
] + logopts + remoteopts + subrepoopts,
_('[-M] [-p] [-n] [-f] [-r REV]... [DEST]'))
def outgoing(ui, repo, dest=None, **opts):
"""show changesets not found in the destination
Show changesets not found in the specified destination repository
or the default push location. These are the changesets that would
be pushed if a push was requested.
See pull for details of valid destination formats.
Returns 0 if there are outgoing changes, 1 otherwise.
"""
if opts.get('graph'):
cmdutil.checkunsupportedgraphflags([], opts)
o = hg._outgoing(ui, repo, dest, opts)
if o is None:
return
revdag = cmdutil.graphrevs(repo, o, opts)
displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True)
showparents = [ctx.node() for ctx in repo[None].parents()]
cmdutil.displaygraph(ui, revdag, displayer, showparents,
graphmod.asciiedges)
return 0
if opts.get('bookmarks'):
dest = ui.expandpath(dest or 'default-push', dest or 'default')
dest, branches = hg.parseurl(dest, opts.get('branch'))
other = hg.peer(repo, opts, dest)
if 'bookmarks' not in other.listkeys('namespaces'):
ui.warn(_("remote doesn't support bookmarks\n"))
return 0
ui.status(_('comparing with %s\n') % util.hidepassword(dest))
return bookmarks.diff(ui, other, repo)
repo._subtoppath = ui.expandpath(dest or 'default-push', dest or 'default')
try:
return hg.outgoing(ui, repo, dest, opts)
finally:
del repo._subtoppath
@command('parents',
[('r', 'rev', '', _('show parents of the specified revision'), _('REV')),
] + templateopts,
_('[-r REV] [FILE]'))
def parents(ui, repo, file_=None, **opts):
"""show the parents of the working directory or revision
Print the working directory's parent revisions. If a revision is
given via -r/--rev, the parent of that revision will be printed.
If a file argument is given, the revision in which the file was
last changed (before the working directory revision or the
argument to --rev if given) is printed.
Returns 0 on success.
"""
ctx = scmutil.revsingle(repo, opts.get('rev'), None)
if file_:
m = scmutil.match(ctx, (file_,), opts)
if m.anypats() or len(m.files()) != 1:
raise util.Abort(_('can only specify an explicit filename'))
file_ = m.files()[0]
filenodes = []
for cp in ctx.parents():
if not cp:
continue
try:
filenodes.append(cp.filenode(file_))
except error.LookupError:
pass
if not filenodes:
raise util.Abort(_("'%s' not found in manifest!") % file_)
fl = repo.file(file_)
p = [repo.lookup(fl.linkrev(fl.rev(fn))) for fn in filenodes]
else:
p = [cp.node() for cp in ctx.parents()]
displayer = cmdutil.show_changeset(ui, repo, opts)
for n in p:
if n != nullid:
displayer.show(repo[n])
displayer.close()
@command('paths', [], _('[NAME]'))
def paths(ui, repo, search=None):
"""show aliases for remote repositories
Show definition of symbolic path name NAME. If no name is given,
show definition of all available names.
Option -q/--quiet suppresses all output when searching for NAME
and shows only the path names when listing all definitions.
Path names are defined in the [paths] section of your
configuration file and in ``/etc/mercurial/hgrc``. If run inside a
repository, ``.hg/hgrc`` is used, too.
The path names ``default`` and ``default-push`` have a special
meaning. When performing a push or pull operation, they are used
as fallbacks if no location is specified on the command-line.
When ``default-push`` is set, it will be used for push and
``default`` will be used for pull; otherwise ``default`` is used
as the fallback for both. When cloning a repository, the clone
source is written as ``default`` in ``.hg/hgrc``. Note that
``default`` and ``default-push`` apply to all inbound (e.g.
:hg:`incoming`) and outbound (e.g. :hg:`outgoing`, :hg:`email` and
:hg:`bundle`) operations.
See :hg:`help urls` for more information.
Returns 0 on success.
"""
if search:
for name, path in ui.configitems("paths"):
if name == search:
ui.status("%s\n" % util.hidepassword(path))
return
if not ui.quiet:
ui.warn(_("not found!\n"))
return 1
else:
for name, path in ui.configitems("paths"):
if ui.quiet:
ui.write("%s\n" % name)
else:
ui.write("%s = %s\n" % (name, util.hidepassword(path)))
@command('phase',
[('p', 'public', False, _('set changeset phase to public')),
('d', 'draft', False, _('set changeset phase to draft')),
('s', 'secret', False, _('set changeset phase to secret')),
('f', 'force', False, _('allow to move boundary backward')),
('r', 'rev', [], _('target revision'), _('REV')),
],
_('[-p|-d|-s] [-f] [-r] REV...'))
def phase(ui, repo, *revs, **opts):
"""set or show the current phase name
With no argument, show the phase name of specified revisions.
With one of -p/--public, -d/--draft or -s/--secret, change the
phase value of the specified revisions.
Unless -f/--force is specified, :hg:`phase` won't move changeset from a
lower phase to an higher phase. Phases are ordered as follows::
public < draft < secret
Return 0 on success, 1 if no phases were changed or some could not
be changed.
"""
# search for a unique phase argument
targetphase = None
for idx, name in enumerate(phases.phasenames):
if opts[name]:
if targetphase is not None:
raise util.Abort(_('only one phase can be specified'))
targetphase = idx
# look for specified revision
revs = list(revs)
revs.extend(opts['rev'])
if not revs:
raise util.Abort(_('no revisions specified'))
revs = scmutil.revrange(repo, revs)
lock = None
ret = 0
if targetphase is None:
# display
for r in revs:
ctx = repo[r]
ui.write('%i: %s\n' % (ctx.rev(), ctx.phasestr()))
else:
lock = repo.lock()
try:
# set phase
if not revs:
raise util.Abort(_('empty revision set'))
nodes = [repo[r].node() for r in revs]
olddata = repo._phasecache.getphaserevs(repo)[:]
phases.advanceboundary(repo, targetphase, nodes)
if opts['force']:
phases.retractboundary(repo, targetphase, nodes)
finally:
lock.release()
# moving revision from public to draft may hide them
# We have to check result on an unfiltered repository
unfi = repo.unfiltered()
newdata = repo._phasecache.getphaserevs(unfi)
changes = sum(o != newdata[i] for i, o in enumerate(olddata))
cl = unfi.changelog
rejected = [n for n in nodes
if newdata[cl.rev(n)] < targetphase]
if rejected:
ui.warn(_('cannot move %i changesets to a more permissive '
'phase, use --force\n') % len(rejected))
ret = 1
if changes:
msg = _('phase changed for %i changesets\n') % changes
if ret:
ui.status(msg)
else:
ui.note(msg)
else:
ui.warn(_('no phases changed\n'))
ret = 1
return ret
def postincoming(ui, repo, modheads, optupdate, checkout):
if modheads == 0:
return
if optupdate:
movemarkfrom = repo['.'].node()
try:
ret = hg.update(repo, checkout)
except util.Abort, inst:
ui.warn(_("not updating: %s\n") % str(inst))
return 0
if not ret and not checkout:
if bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
ui.status(_("updating bookmark %s\n") % repo._bookmarkcurrent)
return ret
if modheads > 1:
currentbranchheads = len(repo.branchheads())
if currentbranchheads == modheads:
ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n"))
elif currentbranchheads > 1:
ui.status(_("(run 'hg heads .' to see heads, 'hg merge' to "
"merge)\n"))
else:
ui.status(_("(run 'hg heads' to see heads)\n"))
else:
ui.status(_("(run 'hg update' to get a working copy)\n"))
@command('^pull',
[('u', 'update', None,
_('update to new branch head if changesets were pulled')),
('f', 'force', None, _('run even when remote repository is unrelated')),
('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')),
('B', 'bookmark', [], _("bookmark to pull"), _('BOOKMARK')),
('b', 'branch', [], _('a specific branch you would like to pull'),
_('BRANCH')),
] + remoteopts,
_('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]'))
def pull(ui, repo, source="default", **opts):
"""pull changes from the specified source
Pull changes from a remote repository to a local one.
This finds all changes from the repository at the specified path
or URL and adds them to a local repository (the current one unless
-R is specified). By default, this does not update the copy of the
project in the working directory.
Use :hg:`incoming` if you want to see what would have been added
by a pull at the time you issued this command. If you then decide
to add those changes to the repository, you should use :hg:`pull
-r X` where ``X`` is the last changeset listed by :hg:`incoming`.
If SOURCE is omitted, the 'default' path will be used.
See :hg:`help urls` for more information.
Returns 0 on success, 1 if an update had unresolved files.
"""
source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
other = hg.peer(repo, opts, source)
ui.status(_('pulling from %s\n') % util.hidepassword(source))
revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev'))
remotebookmarks = other.listkeys('bookmarks')
if opts.get('bookmark'):
if not revs:
revs = []
for b in opts['bookmark']:
if b not in remotebookmarks:
raise util.Abort(_('remote bookmark %s not found!') % b)
revs.append(remotebookmarks[b])
if revs:
try:
revs = [other.lookup(rev) for rev in revs]
except error.CapabilityError:
err = _("other repository doesn't support revision lookup, "
"so a rev cannot be specified.")
raise util.Abort(err)
modheads = repo.pull(other, heads=revs, force=opts.get('force'))
bookmarks.updatefromremote(ui, repo, remotebookmarks, source)
if checkout:
checkout = str(repo.changelog.rev(other.lookup(checkout)))
repo._subtoppath = source
try:
ret = postincoming(ui, repo, modheads, opts.get('update'), checkout)
finally:
del repo._subtoppath
# update specified bookmarks
if opts.get('bookmark'):
marks = repo._bookmarks
for b in opts['bookmark']:
# explicit pull overrides local bookmark if any
ui.status(_("importing bookmark %s\n") % b)
marks[b] = repo[remotebookmarks[b]].node()
marks.write()
return ret
@command('^push',
[('f', 'force', None, _('force push')),
('r', 'rev', [],
_('a changeset intended to be included in the destination'),
_('REV')),
('B', 'bookmark', [], _("bookmark to push"), _('BOOKMARK')),
('b', 'branch', [],
_('a specific branch you would like to push'), _('BRANCH')),
('', 'new-branch', False, _('allow pushing a new branch')),
] + remoteopts,
_('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]'))
def push(ui, repo, dest=None, **opts):
"""push changes to the specified destination
Push changesets from the local repository to the specified
destination.
This operation is symmetrical to pull: it is identical to a pull
in the destination repository from the current one.
By default, push will not allow creation of new heads at the
destination, since multiple heads would make it unclear which head
to use. In this situation, it is recommended to pull and merge
before pushing.
Use --new-branch if you want to allow push to create a new named
branch that is not present at the destination. This allows you to
only create a new branch without forcing other changes.
Use -f/--force to override the default behavior and push all
changesets on all branches.
If -r/--rev is used, the specified revision and all its ancestors
will be pushed to the remote repository.
If -B/--bookmark is used, the specified bookmarked revision, its
ancestors, and the bookmark will be pushed to the remote
repository.
Please see :hg:`help urls` for important details about ``ssh://``
URLs. If DESTINATION is omitted, a default path will be used.
Returns 0 if push was successful, 1 if nothing to push.
"""
if opts.get('bookmark'):
for b in opts['bookmark']:
# translate -B options to -r so changesets get pushed
if b in repo._bookmarks:
opts.setdefault('rev', []).append(b)
else:
# if we try to push a deleted bookmark, translate it to null
# this lets simultaneous -r, -b options continue working
opts.setdefault('rev', []).append("null")
dest = ui.expandpath(dest or 'default-push', dest or 'default')
dest, branches = hg.parseurl(dest, opts.get('branch'))
ui.status(_('pushing to %s\n') % util.hidepassword(dest))
revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
other = hg.peer(repo, opts, dest)
if revs:
revs = [repo.lookup(r) for r in scmutil.revrange(repo, revs)]
repo._subtoppath = dest
try:
# push subrepos depth-first for coherent ordering
c = repo['']
subs = c.substate # only repos that are committed
for s in sorted(subs):
if c.sub(s).push(opts) == 0:
return False
finally:
del repo._subtoppath
result = repo.push(other, opts.get('force'), revs=revs,
newbranch=opts.get('new_branch'))
result = not result
if opts.get('bookmark'):
rb = other.listkeys('bookmarks')
for b in opts['bookmark']:
# explicit push overrides remote bookmark if any
if b in repo._bookmarks:
ui.status(_("exporting bookmark %s\n") % b)
new = repo[b].hex()
elif b in rb:
ui.status(_("deleting remote bookmark %s\n") % b)
new = '' # delete
else:
ui.warn(_('bookmark %s does not exist on the local '
'or remote repository!\n') % b)
return 2
old = rb.get(b, '')
r = other.pushkey('bookmarks', b, old, new)
if not r:
ui.warn(_('updating bookmark %s failed!\n') % b)
if not result:
result = 2
return result
@command('recover', [])
def recover(ui, repo):
"""roll back an interrupted transaction
Recover from an interrupted commit or pull.
This command tries to fix the repository status after an
interrupted operation. It should only be necessary when Mercurial
suggests it.
Returns 0 if successful, 1 if nothing to recover or verify fails.
"""
if repo.recover():
return hg.verify(repo)
return 1
@command('^remove|rm',
[('A', 'after', None, _('record delete for missing files')),
('f', 'force', None,
_('remove (and delete) file even if added or modified')),
] + walkopts,
_('[OPTION]... FILE...'))
def remove(ui, repo, *pats, **opts):
"""remove the specified files on the next commit
Schedule the indicated files for removal from the current branch.
This command schedules the files to be removed at the next commit.
To undo a remove before that, see :hg:`revert`. To undo added
files, see :hg:`forget`.
.. container:: verbose
-A/--after can be used to remove only files that have already
been deleted, -f/--force can be used to force deletion, and -Af
can be used to remove files from the next revision without
deleting them from the working directory.
The following table details the behavior of remove for different
file states (columns) and option combinations (rows). The file
states are Added [A], Clean [C], Modified [M] and Missing [!]
(as reported by :hg:`status`). The actions are Warn, Remove
(from branch) and Delete (from disk):
======= == == == ==
A C M !
======= == == == ==
none W RD W R
-f R RD RD R
-A W W W R
-Af R R R R
======= == == == ==
Note that remove never deletes files in Added [A] state from the
working directory, not even if option --force is specified.
Returns 0 on success, 1 if any warnings encountered.
"""
ret = 0
after, force = opts.get('after'), opts.get('force')
if not pats and not after:
raise util.Abort(_('no files specified'))
m = scmutil.match(repo[None], pats, opts)
s = repo.status(match=m, clean=True)
modified, added, deleted, clean = s[0], s[1], s[3], s[6]
# warn about failure to delete explicit files/dirs
wctx = repo[None]
for f in m.files():
if f in repo.dirstate or f in wctx.dirs():
continue
if os.path.exists(m.rel(f)):
if os.path.isdir(m.rel(f)):
ui.warn(_('not removing %s: no tracked files\n') % m.rel(f))
else:
ui.warn(_('not removing %s: file is untracked\n') % m.rel(f))
# missing files will generate a warning elsewhere
ret = 1
if force:
list = modified + deleted + clean + added
elif after:
list = deleted
for f in modified + added + clean:
ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
ret = 1
else:
list = deleted + clean
for f in modified:
ui.warn(_('not removing %s: file is modified (use -f'
' to force removal)\n') % m.rel(f))
ret = 1
for f in added:
ui.warn(_('not removing %s: file has been marked for add'
' (use forget to undo)\n') % m.rel(f))
ret = 1
for f in sorted(list):
if ui.verbose or not m.exact(f):
ui.status(_('removing %s\n') % m.rel(f))
wlock = repo.wlock()
try:
if not after:
for f in list:
if f in added:
continue # we never unlink added files on remove
util.unlinkpath(repo.wjoin(f), ignoremissing=True)
repo[None].forget(list)
finally:
wlock.release()
return ret
@command('rename|move|mv',
[('A', 'after', None, _('record a rename that has already occurred')),
('f', 'force', None, _('forcibly copy over an existing managed file')),
] + walkopts + dryrunopts,
_('[OPTION]... SOURCE... DEST'))
def rename(ui, repo, *pats, **opts):
"""rename files; equivalent of copy + remove
Mark dest as copies of sources; mark sources for deletion. If dest
is a directory, copies are put in that directory. If dest is a
file, there can only be one source.
By default, this command copies the contents of files as they
exist in the working directory. If invoked with -A/--after, the
operation is recorded, but no copying is performed.
This command takes effect at the next commit. To undo a rename
before that, see :hg:`revert`.
Returns 0 on success, 1 if errors are encountered.
"""
wlock = repo.wlock(False)
try:
return cmdutil.copy(ui, repo, pats, opts, rename=True)
finally:
wlock.release()
@command('resolve',
[('a', 'all', None, _('select all unresolved files')),
('l', 'list', None, _('list state of files needing merge')),
('m', 'mark', None, _('mark files as resolved')),
('u', 'unmark', None, _('mark files as unresolved')),
('n', 'no-status', None, _('hide status prefix'))]
+ mergetoolopts + walkopts,
_('[OPTION]... [FILE]...'))
def resolve(ui, repo, *pats, **opts):
"""redo merges or set/view the merge status of files
Merges with unresolved conflicts are often the result of
non-interactive merging using the ``internal:merge`` configuration
setting, or a command-line merge tool like ``diff3``. The resolve
command is used to manage the files involved in a merge, after
:hg:`merge` has been run, and before :hg:`commit` is run (i.e. the
working directory must have two parents). See :hg:`help
merge-tools` for information on configuring merge tools.
The resolve command can be used in the following ways:
- :hg:`resolve [--tool TOOL] FILE...`: attempt to re-merge the specified
files, discarding any previous merge attempts. Re-merging is not
performed for files already marked as resolved. Use ``--all/-a``
to select all unresolved files. ``--tool`` can be used to specify
the merge tool used for the given files. It overrides the HGMERGE
environment variable and your configuration files. Previous file
contents are saved with a ``.orig`` suffix.
- :hg:`resolve -m [FILE]`: mark a file as having been resolved
(e.g. after having manually fixed-up the files). The default is
to mark all unresolved files.
- :hg:`resolve -u [FILE]...`: mark a file as unresolved. The
default is to mark all resolved files.
- :hg:`resolve -l`: list files which had or still have conflicts.
In the printed list, ``U`` = unresolved and ``R`` = resolved.
Note that Mercurial will not let you commit files with unresolved
merge conflicts. You must use :hg:`resolve -m ...` before you can
commit after a conflicting merge.
Returns 0 on success, 1 if any files fail a resolve attempt.
"""
all, mark, unmark, show, nostatus = \
[opts.get(o) for o in 'all mark unmark list no_status'.split()]
if (show and (mark or unmark)) or (mark and unmark):
raise util.Abort(_("too many options specified"))
if pats and all:
raise util.Abort(_("can't specify --all and patterns"))
if not (all or pats or show or mark or unmark):
raise util.Abort(_('no files or directories specified; '
'use --all to remerge all files'))
ms = mergemod.mergestate(repo)
m = scmutil.match(repo[None], pats, opts)
ret = 0
for f in ms:
if m(f):
if show:
if nostatus:
ui.write("%s\n" % f)
else:
ui.write("%s %s\n" % (ms[f].upper(), f),
label='resolve.' +
{'u': 'unresolved', 'r': 'resolved'}[ms[f]])
elif mark:
ms.mark(f, "r")
elif unmark:
ms.mark(f, "u")
else:
wctx = repo[None]
mctx = wctx.parents()[-1]
# backup pre-resolve (merge uses .orig for its own purposes)
a = repo.wjoin(f)
util.copyfile(a, a + ".resolve")
try:
# resolve file
ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
if ms.resolve(f, wctx, mctx):
ret = 1
finally:
ui.setconfig('ui', 'forcemerge', '')
ms.commit()
# replace filemerge's .orig file with our resolve file
util.rename(a + ".resolve", a + ".orig")
ms.commit()
return ret
@command('revert',
[('a', 'all', None, _('revert all changes when no arguments given')),
('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
('r', 'rev', '', _('revert to the specified revision'), _('REV')),
('C', 'no-backup', None, _('do not save backup copies of files')),
] + walkopts + dryrunopts,
_('[OPTION]... [-r REV] [NAME]...'))
def revert(ui, repo, *pats, **opts):
"""restore files to their checkout state
.. note::
To check out earlier revisions, you should use :hg:`update REV`.
To cancel an uncommitted merge (and lose your changes),
use :hg:`update --clean .`.
With no revision specified, revert the specified files or directories
to the contents they had in the parent of the working directory.
This restores the contents of files to an unmodified
state and unschedules adds, removes, copies, and renames. If the
working directory has two parents, you must explicitly specify a
revision.
Using the -r/--rev or -d/--date options, revert the given files or
directories to their states as of a specific revision. Because
revert does not change the working directory parents, this will
cause these files to appear modified. This can be helpful to "back
out" some or all of an earlier change. See :hg:`backout` for a
related method.
Modified files are saved with a .orig suffix before reverting.
To disable these backups, use --no-backup.
See :hg:`help dates` for a list of formats valid for -d/--date.
Returns 0 on success.
"""
if opts.get("date"):
if opts.get("rev"):
raise util.Abort(_("you can't specify a revision and a date"))
opts["rev"] = cmdutil.finddate(ui, repo, opts["date"])
parent, p2 = repo.dirstate.parents()
if not opts.get('rev') and p2 != nullid:
# revert after merge is a trap for new users (issue2915)
raise util.Abort(_('uncommitted merge with no revision specified'),
hint=_('use "hg update" or see "hg help revert"'))
ctx = scmutil.revsingle(repo, opts.get('rev'))
if not pats and not opts.get('all'):
msg = _("no files or directories specified")
if p2 != nullid:
hint = _("uncommitted merge, use --all to discard all changes,"
" or 'hg update -C .' to abort the merge")
raise util.Abort(msg, hint=hint)
dirty = util.any(repo.status())
node = ctx.node()
if node != parent:
if dirty:
hint = _("uncommitted changes, use --all to discard all"
" changes, or 'hg update %s' to update") % ctx.rev()
else:
hint = _("use --all to revert all files,"
" or 'hg update %s' to update") % ctx.rev()
elif dirty:
hint = _("uncommitted changes, use --all to discard all changes")
else:
hint = _("use --all to revert all files")
raise util.Abort(msg, hint=hint)
return cmdutil.revert(ui, repo, ctx, (parent, p2), *pats, **opts)
@command('rollback', dryrunopts +
[('f', 'force', False, _('ignore safety measures'))])
def rollback(ui, repo, **opts):
"""roll back the last transaction (dangerous)
This command should be used with care. There is only one level of
rollback, and there is no way to undo a rollback. It will also
restore the dirstate at the time of the last transaction, losing
any dirstate changes since that time. This command does not alter
the working directory.
Transactions are used to encapsulate the effects of all commands
that create new changesets or propagate existing changesets into a
repository.
.. container:: verbose
For example, the following commands are transactional, and their
effects can be rolled back:
- commit
- import
- pull
- push (with this repository as the destination)
- unbundle
To avoid permanent data loss, rollback will refuse to rollback a
commit transaction if it isn't checked out. Use --force to
override this protection.
This command is not intended for use on public repositories. Once
changes are visible for pull by other users, rolling a transaction
back locally is ineffective (someone else may already have pulled
the changes). Furthermore, a race is possible with readers of the
repository; for example an in-progress pull from the repository
may fail if a rollback is performed.
Returns 0 on success, 1 if no rollback data is available.
"""
return repo.rollback(dryrun=opts.get('dry_run'),
force=opts.get('force'))
@command('root', [])
def root(ui, repo):
"""print the root (top) of the current working directory
Print the root directory of the current repository.
Returns 0 on success.
"""
ui.write(repo.root + "\n")
@command('^serve',
[('A', 'accesslog', '', _('name of access log file to write to'),
_('FILE')),
('d', 'daemon', None, _('run server in background')),
('', 'daemon-pipefds', '', _('used internally by daemon mode'), _('NUM')),
('E', 'errorlog', '', _('name of error log file to write to'), _('FILE')),
# use string type, then we can check if something was passed
('p', 'port', '', _('port to listen on (default: 8000)'), _('PORT')),
('a', 'address', '', _('address to listen on (default: all interfaces)'),
_('ADDR')),
('', 'prefix', '', _('prefix path to serve from (default: server root)'),
_('PREFIX')),
('n', 'name', '',
_('name to show in web pages (default: working directory)'), _('NAME')),
('', 'web-conf', '',
_('name of the hgweb config file (see "hg help hgweb")'), _('FILE')),
('', 'webdir-conf', '', _('name of the hgweb config file (DEPRECATED)'),
_('FILE')),
('', 'pid-file', '', _('name of file to write process ID to'), _('FILE')),
('', 'stdio', None, _('for remote clients')),
('', 'cmdserver', '', _('for remote clients'), _('MODE')),
('t', 'templates', '', _('web templates to use'), _('TEMPLATE')),
('', 'style', '', _('template style to use'), _('STYLE')),
('6', 'ipv6', None, _('use IPv6 in addition to IPv4')),
('', 'certificate', '', _('SSL certificate file'), _('FILE'))],
_('[OPTION]...'))
def serve(ui, repo, **opts):
"""start stand-alone webserver
Start a local HTTP repository browser and pull server. You can use
this for ad-hoc sharing and browsing of repositories. It is
recommended to use a real web server to serve a repository for
longer periods of time.
Please note that the server does not implement access control.
This means that, by default, anybody can read from the server and
nobody can write to it by default. Set the ``web.allow_push``
option to ``*`` to allow everybody to push to the server. You
should use a real web server if you need to authenticate users.
By default, the server logs accesses to stdout and errors to
stderr. Use the -A/--accesslog and -E/--errorlog options to log to
files.
To have the server choose a free port number to listen on, specify
a port number of 0; in this case, the server will print the port
number it uses.
Returns 0 on success.
"""
if opts["stdio"] and opts["cmdserver"]:
raise util.Abort(_("cannot use --stdio with --cmdserver"))
def checkrepo():
if repo is None:
raise error.RepoError(_("there is no Mercurial repository here"
" (.hg not found)"))
if opts["stdio"]:
checkrepo()
s = sshserver.sshserver(ui, repo)
s.serve_forever()
if opts["cmdserver"]:
checkrepo()
s = commandserver.server(ui, repo, opts["cmdserver"])
return s.serve()
# this way we can check if something was given in the command-line
if opts.get('port'):
opts['port'] = util.getport(opts.get('port'))
baseui = repo and repo.baseui or ui
optlist = ("name templates style address port prefix ipv6"
" accesslog errorlog certificate encoding")
for o in optlist.split():
val = opts.get(o, '')
if val in (None, ''): # should check against default options instead
continue
baseui.setconfig("web", o, val)
if repo and repo.ui != baseui:
repo.ui.setconfig("web", o, val)
o = opts.get('web_conf') or opts.get('webdir_conf')
if not o:
if not repo:
raise error.RepoError(_("there is no Mercurial repository"
" here (.hg not found)"))
o = repo
app = hgweb.hgweb(o, baseui=baseui)
class service(object):
def init(self):
util.setsignalhandler()
self.httpd = hgweb.server.create_server(ui, app)
if opts['port'] and not ui.verbose:
return
if self.httpd.prefix:
prefix = self.httpd.prefix.strip('/') + '/'
else:
prefix = ''
port = ':%d' % self.httpd.port
if port == ':80':
port = ''
bindaddr = self.httpd.addr
if bindaddr == '0.0.0.0':
bindaddr = '*'
elif ':' in bindaddr: # IPv6
bindaddr = '[%s]' % bindaddr
fqaddr = self.httpd.fqaddr
if ':' in fqaddr:
fqaddr = '[%s]' % fqaddr
if opts['port']:
write = ui.status
else:
write = ui.write
write(_('listening at http://%s%s/%s (bound to %s:%d)\n') %
(fqaddr, port, prefix, bindaddr, self.httpd.port))
def run(self):
self.httpd.serve_forever()
service = service()
cmdutil.service(opts, initfn=service.init, runfn=service.run)
@command('showconfig|debugconfig',
[('u', 'untrusted', None, _('show untrusted configuration options'))],
_('[-u] [NAME]...'))
def showconfig(ui, repo, *values, **opts):
"""show combined config settings from all hgrc files
With no arguments, print names and values of all config items.
With one argument of the form section.name, print just the value
of that config item.
With multiple arguments, print names and values of all config
items with matching section names.
With --debug, the source (filename and line number) is printed
for each config item.
Returns 0 on success.
"""
for f in scmutil.rcpath():
ui.debug('read config from: %s\n' % f)
untrusted = bool(opts.get('untrusted'))
if values:
sections = [v for v in values if '.' not in v]
items = [v for v in values if '.' in v]
if len(items) > 1 or items and sections:
raise util.Abort(_('only one config item permitted'))
for section, name, value in ui.walkconfig(untrusted=untrusted):
value = str(value).replace('\n', '\\n')
sectname = section + '.' + name
if values:
for v in values:
if v == section:
ui.debug('%s: ' %
ui.configsource(section, name, untrusted))
ui.write('%s=%s\n' % (sectname, value))
elif v == sectname:
ui.debug('%s: ' %
ui.configsource(section, name, untrusted))
ui.write(value, '\n')
else:
ui.debug('%s: ' %
ui.configsource(section, name, untrusted))
ui.write('%s=%s\n' % (sectname, value))
@command('^status|st',
[('A', 'all', None, _('show status of all files')),
('m', 'modified', None, _('show only modified files')),
('a', 'added', None, _('show only added files')),
('r', 'removed', None, _('show only removed files')),
('d', 'deleted', None, _('show only deleted (but tracked) files')),
('c', 'clean', None, _('show only files without changes')),
('u', 'unknown', None, _('show only unknown (not tracked) files')),
('i', 'ignored', None, _('show only ignored files')),
('n', 'no-status', None, _('hide status prefix')),
('C', 'copies', None, _('show source of copied files')),
('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
('', 'rev', [], _('show difference from revision'), _('REV')),
('', 'change', '', _('list the changed files of a revision'), _('REV')),
] + walkopts + subrepoopts,
_('[OPTION]... [FILE]...'))
def status(ui, repo, *pats, **opts):
"""show changed files in the working directory
Show status of files in the repository. If names are given, only
files that match are shown. Files that are clean or ignored or
the source of a copy/move operation, are not listed unless
-c/--clean, -i/--ignored, -C/--copies or -A/--all are given.
Unless options described with "show only ..." are given, the
options -mardu are used.
Option -q/--quiet hides untracked (unknown and ignored) files
unless explicitly requested with -u/--unknown or -i/--ignored.
.. note::
status may appear to disagree with diff if permissions have
changed or a merge has occurred. The standard diff format does
not report permission changes and diff only reports changes
relative to one merge parent.
If one revision is given, it is used as the base revision.
If two revisions are given, the differences between them are
shown. The --change option can also be used as a shortcut to list
the changed files of a revision from its first parent.
The codes used to show the status of files are::
M = modified
A = added
R = removed
C = clean
! = missing (deleted by non-hg command, but still tracked)
? = not tracked
I = ignored
= origin of the previous file listed as A (added)
.. container:: verbose
Examples:
- show changes in the working directory relative to a
changeset::
hg status --rev 9353
- show all changes including copies in an existing changeset::
hg status --copies --change 9353
- get a NUL separated list of added files, suitable for xargs::
hg status -an0
Returns 0 on success.
"""
revs = opts.get('rev')
change = opts.get('change')
if revs and change:
msg = _('cannot specify --rev and --change at the same time')
raise util.Abort(msg)
elif change:
node2 = scmutil.revsingle(repo, change, None).node()
node1 = repo[node2].p1().node()
else:
node1, node2 = scmutil.revpair(repo, revs)
cwd = (pats and repo.getcwd()) or ''
end = opts.get('print0') and '\0' or '\n'
copy = {}
states = 'modified added removed deleted unknown ignored clean'.split()
show = [k for k in states if opts.get(k)]
if opts.get('all'):
show += ui.quiet and (states[:4] + ['clean']) or states
if not show:
show = ui.quiet and states[:4] or states[:5]
stat = repo.status(node1, node2, scmutil.match(repo[node2], pats, opts),
'ignored' in show, 'clean' in show, 'unknown' in show,
opts.get('subrepos'))
changestates = zip(states, 'MAR!?IC', stat)
if (opts.get('all') or opts.get('copies')) and not opts.get('no_status'):
copy = copies.pathcopies(repo[node1], repo[node2])
fm = ui.formatter('status', opts)
fmt = '%s' + end
showchar = not opts.get('no_status')
for state, char, files in changestates:
if state in show:
label = 'status.' + state
for f in files:
fm.startitem()
fm.condwrite(showchar, 'status', '%s ', char, label=label)
fm.write('path', fmt, repo.pathto(f, cwd), label=label)
if f in copy:
fm.write("copy", ' %s' + end, repo.pathto(copy[f], cwd),
label='status.copied')
fm.end()
@command('^summary|sum',
[('', 'remote', None, _('check for push and pull'))], '[--remote]')
def summary(ui, repo, **opts):
"""summarize working directory state
This generates a brief summary of the working directory state,
including parents, branch, commit status, and available updates.
With the --remote option, this will check the default paths for
incoming and outgoing changes. This can be time-consuming.
Returns 0 on success.
"""
ctx = repo[None]
parents = ctx.parents()
pnode = parents[0].node()
marks = []
for p in parents:
# label with log.changeset (instead of log.parent) since this
# shows a working directory parent *changeset*:
# i18n: column positioning for "hg summary"
ui.write(_('parent: %d:%s ') % (p.rev(), str(p)),
label='log.changeset changeset.%s' % p.phasestr())
ui.write(' '.join(p.tags()), label='log.tag')
if p.bookmarks():
marks.extend(p.bookmarks())
if p.rev() == -1:
if not len(repo):
ui.write(_(' (empty repository)'))
else:
ui.write(_(' (no revision checked out)'))
ui.write('\n')
if p.description():
ui.status(' ' + p.description().splitlines()[0].strip() + '\n',
label='log.summary')
branch = ctx.branch()
bheads = repo.branchheads(branch)
# i18n: column positioning for "hg summary"
m = _('branch: %s\n') % branch
if branch != 'default':
ui.write(m, label='log.branch')
else:
ui.status(m, label='log.branch')
if marks:
current = repo._bookmarkcurrent
# i18n: column positioning for "hg summary"
ui.write(_('bookmarks:'), label='log.bookmark')
if current is not None:
if current in marks:
ui.write(' *' + current, label='bookmarks.current')
marks.remove(current)
else:
ui.write(' [%s]' % current, label='bookmarks.current')
for m in marks:
ui.write(' ' + m, label='log.bookmark')
ui.write('\n', label='log.bookmark')
st = list(repo.status(unknown=True))[:6]
c = repo.dirstate.copies()
copied, renamed = [], []
for d, s in c.iteritems():
if s in st[2]:
st[2].remove(s)
renamed.append(d)
else:
copied.append(d)
if d in st[1]:
st[1].remove(d)
st.insert(3, renamed)
st.insert(4, copied)
ms = mergemod.mergestate(repo)
st.append([f for f in ms if ms[f] == 'u'])
subs = [s for s in ctx.substate if ctx.sub(s).dirty()]
st.append(subs)
labels = [ui.label(_('%d modified'), 'status.modified'),
ui.label(_('%d added'), 'status.added'),
ui.label(_('%d removed'), 'status.removed'),
ui.label(_('%d renamed'), 'status.copied'),
ui.label(_('%d copied'), 'status.copied'),
ui.label(_('%d deleted'), 'status.deleted'),
ui.label(_('%d unknown'), 'status.unknown'),
ui.label(_('%d ignored'), 'status.ignored'),
ui.label(_('%d unresolved'), 'resolve.unresolved'),
ui.label(_('%d subrepos'), 'status.modified')]
t = []
for s, l in zip(st, labels):
if s:
t.append(l % len(s))
t = ', '.join(t)
cleanworkdir = False
if len(parents) > 1:
t += _(' (merge)')
elif branch != parents[0].branch():
t += _(' (new branch)')
elif (parents[0].closesbranch() and
pnode in repo.branchheads(branch, closed=True)):
t += _(' (head closed)')
elif not (st[0] or st[1] or st[2] or st[3] or st[4] or st[9]):
t += _(' (clean)')
cleanworkdir = True
elif pnode not in bheads:
t += _(' (new branch head)')
if cleanworkdir:
# i18n: column positioning for "hg summary"
ui.status(_('commit: %s\n') % t.strip())
else:
# i18n: column positioning for "hg summary"
ui.write(_('commit: %s\n') % t.strip())
# all ancestors of branch heads - all ancestors of parent = new csets
new = [0] * len(repo)
cl = repo.changelog
for a in [cl.rev(n) for n in bheads]:
new[a] = 1
for a in cl.ancestors([cl.rev(n) for n in bheads]):
new[a] = 1
for a in [p.rev() for p in parents]:
if a >= 0:
new[a] = 0
for a in cl.ancestors([p.rev() for p in parents]):
new[a] = 0
new = sum(new)
if new == 0:
# i18n: column positioning for "hg summary"
ui.status(_('update: (current)\n'))
elif pnode not in bheads:
# i18n: column positioning for "hg summary"
ui.write(_('update: %d new changesets (update)\n') % new)
else:
# i18n: column positioning for "hg summary"
ui.write(_('update: %d new changesets, %d branch heads (merge)\n') %
(new, len(bheads)))
if opts.get('remote'):
t = []
source, branches = hg.parseurl(ui.expandpath('default'))
sbranch = branches[0]
other = hg.peer(repo, {}, source)
revs, checkout = hg.addbranchrevs(repo, other, branches,
opts.get('rev'))
if revs:
revs = [other.lookup(rev) for rev in revs]
ui.debug('comparing with %s\n' % util.hidepassword(source))
repo.ui.pushbuffer()
commoninc = discovery.findcommonincoming(repo, other, heads=revs)
_common, incoming, _rheads = commoninc
repo.ui.popbuffer()
if incoming:
t.append(_('1 or more incoming'))
dest, branches = hg.parseurl(ui.expandpath('default-push', 'default'))
dbranch = branches[0]
revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
if source != dest:
other = hg.peer(repo, {}, dest)
ui.debug('comparing with %s\n' % util.hidepassword(dest))
if (source != dest or (sbranch is not None and sbranch != dbranch)):
commoninc = None
if revs:
revs = [repo.lookup(rev) for rev in revs]
repo.ui.pushbuffer()
outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs,
commoninc=commoninc)
repo.ui.popbuffer()
o = outgoing.missing
if o:
t.append(_('%d outgoing') % len(o))
if 'bookmarks' in other.listkeys('namespaces'):
lmarks = repo.listkeys('bookmarks')
rmarks = other.listkeys('bookmarks')
diff = set(rmarks) - set(lmarks)
if len(diff) > 0:
t.append(_('%d incoming bookmarks') % len(diff))
diff = set(lmarks) - set(rmarks)
if len(diff) > 0:
t.append(_('%d outgoing bookmarks') % len(diff))
if t:
# i18n: column positioning for "hg summary"
ui.write(_('remote: %s\n') % (', '.join(t)))
else:
# i18n: column positioning for "hg summary"
ui.status(_('remote: (synced)\n'))
@command('tag',
[('f', 'force', None, _('force tag')),
('l', 'local', None, _('make the tag local')),
('r', 'rev', '', _('revision to tag'), _('REV')),
('', 'remove', None, _('remove a tag')),
# -l/--local is already there, commitopts cannot be used
('e', 'edit', None, _('edit commit message')),
('m', 'message', '', _('use <text> as commit message'), _('TEXT')),
] + commitopts2,
_('[-f] [-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...'))
def tag(ui, repo, name1, *names, **opts):
"""add one or more tags for the current or given revision
Name a particular revision using <name>.
Tags are used to name particular revisions of the repository and are
very useful to compare different revisions, to go back to significant
earlier versions or to mark branch points as releases, etc. Changing
an existing tag is normally disallowed; use -f/--force to override.
If no revision is given, the parent of the working directory is
used, or tip if no revision is checked out.
To facilitate version control, distribution, and merging of tags,
they are stored as a file named ".hgtags" which is managed similarly
to other project files and can be hand-edited if necessary. This
also means that tagging creates a new commit. The file
".hg/localtags" is used for local tags (not shared among
repositories).
Tag commits are usually made at the head of a branch. If the parent
of the working directory is not a branch head, :hg:`tag` aborts; use
-f/--force to force the tag commit to be based on a non-head
changeset.
See :hg:`help dates` for a list of formats valid for -d/--date.
Since tag names have priority over branch names during revision
lookup, using an existing branch name as a tag name is discouraged.
Returns 0 on success.
"""
wlock = lock = None
try:
wlock = repo.wlock()
lock = repo.lock()
rev_ = "."
names = [t.strip() for t in (name1,) + names]
if len(names) != len(set(names)):
raise util.Abort(_('tag names must be unique'))
for n in names:
scmutil.checknewlabel(repo, n, 'tag')
if not n:
raise util.Abort(_('tag names cannot consist entirely of '
'whitespace'))
if opts.get('rev') and opts.get('remove'):
raise util.Abort(_("--rev and --remove are incompatible"))
if opts.get('rev'):
rev_ = opts['rev']
message = opts.get('message')
if opts.get('remove'):
expectedtype = opts.get('local') and 'local' or 'global'
for n in names:
if not repo.tagtype(n):
raise util.Abort(_("tag '%s' does not exist") % n)
if repo.tagtype(n) != expectedtype:
if expectedtype == 'global':
raise util.Abort(_("tag '%s' is not a global tag") % n)
else:
raise util.Abort(_("tag '%s' is not a local tag") % n)
rev_ = nullid
if not message:
# we don't translate commit messages
message = 'Removed tag %s' % ', '.join(names)
elif not opts.get('force'):
for n in names:
if n in repo.tags():
raise util.Abort(_("tag '%s' already exists "
"(use -f to force)") % n)
if not opts.get('local'):
p1, p2 = repo.dirstate.parents()
if p2 != nullid:
raise util.Abort(_('uncommitted merge'))
bheads = repo.branchheads()
if not opts.get('force') and bheads and p1 not in bheads:
raise util.Abort(_('not at a branch head (use -f to force)'))
r = scmutil.revsingle(repo, rev_).node()
if not message:
# we don't translate commit messages
message = ('Added tag %s for changeset %s' %
(', '.join(names), short(r)))
date = opts.get('date')
if date:
date = util.parsedate(date)
if opts.get('edit'):
message = ui.edit(message, ui.username())
# don't allow tagging the null rev
if (not opts.get('remove') and
scmutil.revsingle(repo, rev_).rev() == nullrev):
raise util.Abort(_("cannot tag null revision"))
repo.tag(names, r, message, opts.get('local'), opts.get('user'), date)
finally:
release(lock, wlock)
@command('tags', [], '')
def tags(ui, repo, **opts):
"""list repository tags
This lists both regular and local tags. When the -v/--verbose
switch is used, a third column "local" is printed for local tags.
Returns 0 on success.
"""
fm = ui.formatter('tags', opts)
hexfunc = ui.debugflag and hex or short
tagtype = ""
for t, n in reversed(repo.tagslist()):
hn = hexfunc(n)
label = 'tags.normal'
tagtype = ''
if repo.tagtype(t) == 'local':
label = 'tags.local'
tagtype = 'local'
fm.startitem()
fm.write('tag', '%s', t, label=label)
fmt = " " * (30 - encoding.colwidth(t)) + ' %5d:%s'
fm.condwrite(not ui.quiet, 'rev id', fmt,
repo.changelog.rev(n), hn, label=label)
fm.condwrite(ui.verbose and tagtype, 'type', ' %s',
tagtype, label=label)
fm.plain('\n')
fm.end()
@command('tip',
[('p', 'patch', None, _('show patch')),
('g', 'git', None, _('use git extended diff format')),
] + templateopts,
_('[-p] [-g]'))
def tip(ui, repo, **opts):
"""show the tip revision
The tip revision (usually just called the tip) is the changeset
most recently added to the repository (and therefore the most
recently changed head).
If you have just made a commit, that commit will be the tip. If
you have just pulled changes from another repository, the tip of
that repository becomes the current tip. The "tip" tag is special
and cannot be renamed or assigned to a different changeset.
Returns 0 on success.
"""
displayer = cmdutil.show_changeset(ui, repo, opts)
displayer.show(repo['tip'])
displayer.close()
@command('unbundle',
[('u', 'update', None,
_('update to new branch head if changesets were unbundled'))],
_('[-u] FILE...'))
def unbundle(ui, repo, fname1, *fnames, **opts):
"""apply one or more changegroup files
Apply one or more compressed changegroup files generated by the
bundle command.
Returns 0 on success, 1 if an update has unresolved files.
"""
fnames = (fname1,) + fnames
lock = repo.lock()
wc = repo['.']
try:
for fname in fnames:
f = hg.openpath(ui, fname)
gen = changegroup.readbundle(f, fname)
modheads = repo.addchangegroup(gen, 'unbundle', 'bundle:' + fname)
finally:
lock.release()
bookmarks.updatecurrentbookmark(repo, wc.node(), wc.branch())
return postincoming(ui, repo, modheads, opts.get('update'), None)
@command('^update|up|checkout|co',
[('C', 'clean', None, _('discard uncommitted changes (no backup)')),
('c', 'check', None,
_('update across branches if no uncommitted changes')),
('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
('r', 'rev', '', _('revision'), _('REV'))],
_('[-c] [-C] [-d DATE] [[-r] REV]'))
def update(ui, repo, node=None, rev=None, clean=False, date=None, check=False):
"""update working directory (or switch revisions)
Update the repository's working directory to the specified
changeset. If no changeset is specified, update to the tip of the
current named branch and move the current bookmark (see :hg:`help
bookmarks`).
Update sets the working directory's parent revision to the specified
changeset (see :hg:`help parents`).
If the changeset is not a descendant or ancestor of the working
directory's parent, the update is aborted. With the -c/--check
option, the working directory is checked for uncommitted changes; if
none are found, the working directory is updated to the specified
changeset.
.. container:: verbose
The following rules apply when the working directory contains
uncommitted changes:
1. If neither -c/--check nor -C/--clean is specified, and if
the requested changeset is an ancestor or descendant of
the working directory's parent, the uncommitted changes
are merged into the requested changeset and the merged
result is left uncommitted. If the requested changeset is
not an ancestor or descendant (that is, it is on another
branch), the update is aborted and the uncommitted changes
are preserved.
2. With the -c/--check option, the update is aborted and the
uncommitted changes are preserved.
3. With the -C/--clean option, uncommitted changes are discarded and
the working directory is updated to the requested changeset.
To cancel an uncommitted merge (and lose your changes), use
:hg:`update --clean .`.
Use null as the changeset to remove the working directory (like
:hg:`clone -U`).
If you want to revert just one file to an older revision, use
:hg:`revert [-r REV] NAME`.
See :hg:`help dates` for a list of formats valid for -d/--date.
Returns 0 on success, 1 if there are unresolved files.
"""
if rev and node:
raise util.Abort(_("please specify just one revision"))
if rev is None or rev == '':
rev = node
# with no argument, we also move the current bookmark, if any
movemarkfrom = None
if rev is None:
curmark = repo._bookmarkcurrent
if bookmarks.iscurrent(repo):
movemarkfrom = repo['.'].node()
elif curmark:
ui.status(_("updating to active bookmark %s\n") % curmark)
rev = curmark
# if we defined a bookmark, we have to remember the original bookmark name
brev = rev
rev = scmutil.revsingle(repo, rev, rev).rev()
if check and clean:
raise util.Abort(_("cannot specify both -c/--check and -C/--clean"))
if date:
if rev is not None:
raise util.Abort(_("you can't specify a revision and a date"))
rev = cmdutil.finddate(ui, repo, date)
if check:
c = repo[None]
if c.dirty(merge=False, branch=False, missing=True):
raise util.Abort(_("uncommitted local changes"))
if rev is None:
rev = repo[repo[None].branch()].rev()
mergemod._checkunknown(repo, repo[None], repo[rev])
if clean:
ret = hg.clean(repo, rev)
else:
ret = hg.update(repo, rev)
if not ret and movemarkfrom:
if bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
ui.status(_("updating bookmark %s\n") % repo._bookmarkcurrent)
elif brev in repo._bookmarks:
bookmarks.setcurrent(repo, brev)
elif brev:
bookmarks.unsetcurrent(repo)
return ret
@command('verify', [])
def verify(ui, repo):
"""verify the integrity of the repository
Verify the integrity of the current repository.
This will perform an extensive check of the repository's
integrity, validating the hashes and checksums of each entry in
the changelog, manifest, and tracked files, as well as the
integrity of their crosslinks and indices.
Please see http://mercurial.selenic.com/wiki/RepositoryCorruption
for more information about recovery from corruption of the
repository.
Returns 0 on success, 1 if errors are encountered.
"""
return hg.verify(repo)
@command('version', [])
def version_(ui):
"""output version and copyright information"""
ui.write(_("Mercurial Distributed SCM (version %s)\n")
% util.version())
ui.status(_(
"(see http://mercurial.selenic.com for more information)\n"
"\nCopyright (C) 2005-2012 Matt Mackall and others\n"
"This is free software; see the source for copying conditions. "
"There is NO\nwarranty; "
"not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
))
norepo = ("clone init version help debugcommands debugcomplete"
" debugdate debuginstall debugfsinfo debugpushkey debugwireargs"
" debugknown debuggetbundle debugbundle")
optionalrepo = ("identify paths serve showconfig debugancestor debugdag"
" debugdata debugindex debugindexdot debugrevlog")
inferrepo = ("add addremove annotate cat commit diff grep forget log parents"
" remove resolve status debugwalk")
| apache-2.0 |
Azure/azure-sdk-for-python | sdk/loganalytics/azure-loganalytics/azure/loganalytics/models/query_results_py3.py | 1 | 1126 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class QueryResults(Model):
"""A query response.
Contains the tables, columns & rows resulting from a query.
All required parameters must be populated in order to send to Azure.
:param tables: Required. The list of tables, columns and rows.
:type tables: list[~azure.loganalytics.models.Table]
"""
_validation = {
'tables': {'required': True},
}
_attribute_map = {
'tables': {'key': 'tables', 'type': '[Table]'},
}
def __init__(self, *, tables, **kwargs) -> None:
super(QueryResults, self).__init__(**kwargs)
self.tables = tables
| mit |
kdj0c/onepagepoints | generate_faction.py | 1 | 5402 | #!/usr/bin/env python3
"""
Copyright 2017 Jocelyn Falempe kdj0c@djinvi.net
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import yaml
import argparse
import string
import os
from indentyaml import YamlWeapon, YamlUnit, represent_omap, represent_omap_flow
"""
This scripts helps to generate the yaml files for a faction.
It takes a libreoffice calc ".ods" file, and generate the list
of weapons, and units.
The libreoffice calc file can be done with just copy/paste from the original pdf,
and a few "manual" operation
"""
# Parse the equipment list to find characteristics of individual weapons
def parse_equipment(equipment):
global alljweapons
nested = 0
weapons_raw = []
weapons = []
namestart = 0
for index, char in enumerate(equipment):
if char == '(':
if nested == 0:
start = index
nested += 1
elif char == ')':
nested -= 1
if nested == 0:
weapons_raw.append((equipment[namestart:start].strip(' ,'), equipment[start + 1:index]))
namestart = index + 1
for w in weapons_raw:
name = w[0]
wprange = 0
armorPiercing = 0
attacks = 0
special = []
count = 1
for c in w[1].split(','):
c = c.strip()
if c.endswith('"'):
wprange = int(c[:-1])
elif c.startswith('AP'):
armorPiercing = int(c[3:-1])
elif c.startswith('A') and c[1:].isdigit():
attacks = int(c[1:])
else:
special.append(c.strip())
weapons.append(name)
# remove 2x from the weapon name
firstword = name.split()[0]
if firstword.endswith('x'):
if firstword[:-1].isdigit():
name = name[len(firstword) + 1:]
# remove Linked from weapon name
if name.split()[0] == 'Linked':
if 'Linked' in special:
special.remove('Linked')
name = ' '.join(name.split()[1:])
if name not in alljweapons:
alljweapons[name] = YamlWeapon({'range': wprange, 'attacks': attacks, 'ap': armorPiercing, 'special': special})
return weapons
def parse_upgrades(upgrades):
tmp = [up.strip() for up in upgrades.split(',')]
return [up for up in tmp if up != '-']
def parse_special(special):
special = special.split(',')
# strip all whitespace
return [sp.strip() for sp in special]
def parse_units(name, data):
global alljweapons
column_order = ['name', 'count', 'quality', 'defense', 'equipment', 'special', 'upgrades']
alljunits = []
for row in data:
dunit = {}
if len(row) < len(column_order):
continue
for i, col in enumerate(column_order):
if row[i].isnumeric():
dunit[col] = int(row[i])
else:
dunit[col] = row[i]
dunit['equipment'] = parse_equipment(dunit['equipment'])
dunit['special'] = parse_special(dunit['special'])
dunit['upgrades'] = parse_upgrades(dunit['upgrades'])
alljunits.append(YamlUnit(dunit))
with open(name + '.yml', 'w') as f:
yaml.add_representer(YamlUnit, represent_omap)
f.write(yaml.dump(alljunits))
def parse_weapons(data):
for row in data:
parse_equipment(row[0])
def csv_to_list(data):
return [row.split(';') for row in data.split('\n')]
def main():
global alljweapons
parser = argparse.ArgumentParser(description='Parse csv file to help import pdf into yaml')
parser.add_argument('fnames', metavar='fnames', type=str, nargs='+',
help='files to parse')
args = parser.parse_args()
alljweapons = {}
for fname in args.fnames:
with open(fname, 'r') as f:
data = csv_to_list(f.read())
# filename without extension
bname = os.path.basename(fname).split('.')[0]
if bname.startswith('units'):
parse_units(bname, data)
else:
parse_weapons(data)
with open('equipments.yml', 'w') as f:
yaml.add_representer(YamlWeapon, represent_omap_flow)
data = {"weapons": alljweapons,
"wargear": {},
"factionRules": {}}
f.write(yaml.dump(data))
if __name__ == "__main__":
# execute only if run as a script
main()
| mit |
alheinecke/tensorflow-xsmm | tensorflow/python/kernel_tests/basic_gpu_test.py | 52 | 8373 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for basic component wise operations using a GPU device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args
from tensorflow.python.platform import test
class GPUBinaryOpsTest(test.TestCase):
def _compareGPU(self, x, y, np_func, tf_func):
with self.test_session(use_gpu=True) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = sess.run(out)
with self.test_session(use_gpu=False) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = sess.run(out)
self.assertAllClose(tf_cpu, tf_gpu)
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareGPU(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareGPU(x, y, np.power, math_ops.pow)
def testFloatWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float32)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float32)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float64)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float64)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
class MathBuiltinUnaryTest(test.TestCase):
def _compare(self, x, np_func, tf_func, use_gpu):
np_out = np_func(x)
with self.test_session(use_gpu=use_gpu) as sess:
inx = ops.convert_to_tensor(x)
ofunc = tf_func(inx)
tf_out = sess.run(ofunc)
self.assertAllClose(np_out, tf_out)
def _inv(self, x):
return 1.0 / x
def _rsqrt(self, x):
return self._inv(np.sqrt(x))
def _testDtype(self, dtype, use_gpu):
data = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(dtype)
self._compare(data, np.abs, math_ops.abs, use_gpu)
self._compare(data, np.arccos, math_ops.acos, use_gpu)
self._compare(data, np.arcsin, math_ops.asin, use_gpu)
self._compare(data, np.arctan, math_ops.atan, use_gpu)
self._compare(data, np.ceil, math_ops.ceil, use_gpu)
self._compare(data, np.cos, math_ops.cos, use_gpu)
self._compare(data, np.exp, math_ops.exp, use_gpu)
self._compare(data, np.floor, math_ops.floor, use_gpu)
self._compare(data, np.log, math_ops.log, use_gpu)
self._compare(data, np.log1p, math_ops.log1p, use_gpu)
self._compare(data, np.negative, math_ops.negative, use_gpu)
self._compare(data, self._rsqrt, math_ops.rsqrt, use_gpu)
self._compare(data, np.sin, math_ops.sin, use_gpu)
self._compare(data, np.sqrt, math_ops.sqrt, use_gpu)
self._compare(data, np.square, math_ops.square, use_gpu)
self._compare(data, np.tan, math_ops.tan, use_gpu)
self._compare(data, np.tanh, math_ops.tanh, use_gpu)
def testTypes(self):
for dtype in [np.float32]:
self._testDtype(dtype, use_gpu=True)
def testFloorDevide(self):
x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
np_out = np.floor_divide(x, y + 0.1)
with self.test_session(use_gpu=True) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y + 0.1)
ofunc = inx / iny
out_func2 = math_ops.floor(ofunc)
tf_out = sess.run(out_func2)
self.assertAllClose(np_out, tf_out)
class BroadcastSimpleTest(test.TestCase):
def _GetGradientArgs(self, xs, ys):
with self.test_session(use_gpu=True) as sess:
return sess.run(_broadcast_gradient_args(xs, ys))
def testBroadcast(self):
r0, r1 = self._GetGradientArgs([2, 3, 5], [1])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1, 2])
_GRAD_TOL = {dtypes.float32: 1e-3}
def _compareGradientX(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.test_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
xs = list(x.shape)
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, out, zs, x_init_value=x)
tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGradientY(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.test_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
ys = list(np.shape(y))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, ys, out, zs, x_init_value=y)
tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = out.eval()
self.assertAllClose(np_ans, tf_gpu)
self.assertShapeEqual(np_ans, out)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def testGradient(self):
x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
self._compareGradientX(x, y, np.true_divide, math_ops.truediv)
self._compareGradientY(x, y, np.true_divide, math_ops.truediv)
self._compareGpu(x, y, np.true_divide, math_ops.truediv)
self._compareGpu(x, y + 0.1, np.floor_divide, math_ops.floordiv)
if __name__ == "__main__":
test.main()
| apache-2.0 |
ntts-clo/mld-ryu | ryu/lib/packet/llc.py | 22 | 10664 | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Logical Link Control(LLC, IEEE 802.2) parser/serializer
http://standards.ieee.org/getieee802/download/802.2-1998.pdf
LLC format
+-----------------+--------------+
| DSAP address | 8 bits |
+-----------------+--------------+
| SSAP address | 8 bits |
+-----------------+--------------+
| Control | 8 or 16 bits |
+-----------------+--------------+
DSAP address field
LSB
+-----+---+---+---+---+---+---+---+
| I/G | D | D | D | D | D | D | D |
+-----+---+---+---+---+---+---+---+
I/G bit = 0 : Individual DSAP
I/G bit = 1 : Group DSA
D : DSAP address
SSAP address field
LSB
+-----+---+---+---+---+---+---+---+
| C/R | S | S | S | S | S | S | S |
+-----+---+---+---+---+---+---+---+
C/R bit = 0 : Command
C/R bit = 1 : Response
S : SSAP address
Control field
Information transfer
command/response
(I-format PDU)
1 2 3 4 5 6 7 8 9 10-16
+---+---+---+---+---+---+---+---+-----+------+
| 0 | N(S) | P/F | N(R) |
+---+---+---+---+---+---+---+---+-----+------+
Supervisory
commands/responses
(S-format PDUs)
1 2 3 4 5 6 7 8 9 10-16
+---+---+---+---+---+---+---+---+-----+------+
| 1 0 | S S | 0 0 0 0 | P/F | N(R) |
+---+---+---+---+---+---+---+---+-----+------+
Unnumbered
commands/responses
(U-format PDUs)
1 2 3 4 5 6 7 8
+---+---+----+---+-----+---+----+---+
| 1 1 | M1 M1 | P/F | M2 M2 M2 |
+---+---+----+---+-----+---+----+---+
N(S) : sender send sequence number (Bit 2=lower-order-bit)
N(R) : sender receive sequence number (Bit 10=lower-order-bit)
S : supervisory function bit
M1/M2: modifier function bit
P/F : poll bit - command LLC PDUs
final bit - response LLC PDUs
"""
import struct
from . import bpdu
from . import packet_base
from ryu.lib import stringify
SAP_BPDU = 0x42
class llc(packet_base.PacketBase):
"""LLC(IEEE 802.2) header encoder/decoder class.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte
order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
=============== ===============================================
Attribute Description
=============== ===============================================
dsap_addr Destination service access point address field \
includes I/G bit at least significant bit.
ssap_addr Source service access point address field \
includes C/R bit at least significant bit.
control Control field \
[16 bits for formats that include sequence \
numbering, and 8 bits for formats that do not]. \
Either ryu.lib.packet.llc.ControlFormatI or \
ryu.lib.packet.llc.ControlFormatS or \
ryu.lib.packet.llc.ControlFormatU object.
=============== ===============================================
"""
_PACK_STR = '!BB'
_PACK_LEN = struct.calcsize(_PACK_STR)
_CTR_TYPES = {}
_CTR_PACK_STR = '!2xB'
_MIN_LEN = _PACK_LEN
@staticmethod
def register_control_type(register_cls):
llc._CTR_TYPES[register_cls.TYPE] = register_cls
return register_cls
def __init__(self, dsap_addr, ssap_addr, control):
super(llc, self).__init__()
assert getattr(control, 'TYPE', None) in self._CTR_TYPES
self.dsap_addr = dsap_addr
self.ssap_addr = ssap_addr
self.control = control
@classmethod
def parser(cls, buf):
assert len(buf) >= cls._PACK_LEN
(dsap_addr, ssap_addr) = struct.unpack_from(cls._PACK_STR, buf)
(control,) = struct.unpack_from(cls._CTR_PACK_STR, buf)
ctrl = cls._get_control(control)
control, information = ctrl.parser(buf[cls._PACK_LEN:])
return (cls(dsap_addr, ssap_addr, control),
cls.get_packet_type(dsap_addr), information)
def serialize(self, payload, prev):
addr = struct.pack(self._PACK_STR, self.dsap_addr, self.ssap_addr)
control = self.control.serialize()
return addr + control
@classmethod
def _get_control(cls, buf):
key = buf & 0b1 if buf & 0b1 == ControlFormatI.TYPE else buf & 0b11
return cls._CTR_TYPES[key]
@llc.register_control_type
class ControlFormatI(stringify.StringifyMixin):
"""LLC sub encoder/decoder class for control I-format field.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte
order.
__init__ takes the corresponding args in this order.
======================== ===============================
Attribute Description
======================== ===============================
send_sequence_number sender send sequence number
pf_bit poll/final bit
receive_sequence_number sender receive sequence number
======================== ===============================
"""
TYPE = 0b0
_PACK_STR = '!H'
_PACK_LEN = struct.calcsize(_PACK_STR)
def __init__(self, send_sequence_number=0, pf_bit=0,
receive_sequence_number=0):
super(ControlFormatI, self).__init__()
self.send_sequence_number = send_sequence_number
self.pf_bit = pf_bit
self.receive_sequence_number = receive_sequence_number
@classmethod
def parser(cls, buf):
assert len(buf) >= cls._PACK_LEN
(control,) = struct.unpack_from(cls._PACK_STR, buf)
assert (control >> 8) & 0b1 == cls.TYPE
send_sequence_number = (control >> 9) & 0b1111111
pf_bit = (control >> 8) & 0b1
receive_sequence_number = (control >> 1) & 0b1111111
return cls(send_sequence_number, pf_bit,
receive_sequence_number), buf[cls._PACK_LEN:]
def serialize(self):
control = (self.send_sequence_number << 9 |
self.TYPE << 8 |
self.receive_sequence_number << 1 |
self.pf_bit)
return struct.pack(self._PACK_STR, control)
@llc.register_control_type
class ControlFormatS(stringify.StringifyMixin):
"""LLC sub encoder/decoder class for control S-format field.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte
order.
__init__ takes the corresponding args in this order.
======================== ===============================
Attribute Description
======================== ===============================
supervisory_function supervisory function bit
pf_bit poll/final bit
receive_sequence_number sender receive sequence number
======================== ===============================
"""
TYPE = 0b01
_PACK_STR = '!H'
_PACK_LEN = struct.calcsize(_PACK_STR)
def __init__(self, supervisory_function=0, pf_bit=0,
receive_sequence_number=0):
super(ControlFormatS, self).__init__()
self.supervisory_function = supervisory_function
self.pf_bit = pf_bit
self.receive_sequence_number = receive_sequence_number
@classmethod
def parser(cls, buf):
assert len(buf) >= cls._PACK_LEN
(control,) = struct.unpack_from(cls._PACK_STR, buf)
assert (control >> 8) & 0b11 == cls.TYPE
assert (control >> 12) & 0b1111 == 0
supervisory_function = (control >> 10) & 0b11
pf_bit = (control >> 8) & 0b1
receive_sequence_number = (control >> 1) & 0b1111111
return cls(supervisory_function, pf_bit,
receive_sequence_number), buf[cls._PACK_LEN:]
def serialize(self):
control = (self.supervisory_function << 10 |
self.TYPE << 8 |
self.receive_sequence_number << 1 |
self.pf_bit)
return struct.pack(self._PACK_STR, control)
@llc.register_control_type
class ControlFormatU(stringify.StringifyMixin):
"""LLC sub encoder/decoder class for control U-format field.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte
order.
__init__ takes the corresponding args in this order.
======================== ===============================
Attribute Description
======================== ===============================
modifier_function1 modifier function bit
pf_bit poll/final bit
modifier_function2 modifier function bit
======================== ===============================
"""
TYPE = 0b11
_PACK_STR = '!B'
_PACK_LEN = struct.calcsize(_PACK_STR)
def __init__(self, modifier_function1=0, pf_bit=0, modifier_function2=0):
super(ControlFormatU, self).__init__()
self.modifier_function1 = modifier_function1
self.pf_bit = pf_bit
self.modifier_function2 = modifier_function2
@classmethod
def parser(cls, buf):
assert len(buf) >= cls._PACK_LEN
(control,) = struct.unpack_from(cls._PACK_STR, buf)
assert control & 0b11 == cls.TYPE
modifier_function1 = (control >> 2) & 0b11
pf_bit = (control >> 4) & 0b1
modifier_function2 = (control >> 5) & 0b111
return cls(modifier_function1, pf_bit,
modifier_function2), buf[cls._PACK_LEN:]
def serialize(self):
control = (self.modifier_function2 << 5 |
self.pf_bit << 4 |
self.modifier_function1 << 2 |
self.TYPE)
return struct.pack(self._PACK_STR, control)
llc.register_packet_type(bpdu.bpdu, SAP_BPDU)
llc.set_classes(llc._CTR_TYPES)
| apache-2.0 |
yaozongyou/common | third_party/gtest/test/gtest_env_var_test.py | 2408 | 3487 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
40223142/cda11 | static/Brython3.1.1-20150328-091302/Lib/site-packages/turtle.py | 619 | 105984 | import math
from javascript import console
from browser import document, html
import _svg
_CFG = {"width" : 0.5, # Screen
"height" : 0.75,
"canvwidth" : 400,
"canvheight": 300,
"leftright": None,
"topbottom": None,
"mode": "standard", # TurtleScreen
"colormode": 1.0,
"delay": 10,
"undobuffersize": 1000, # RawTurtle
"shape": "classic",
"pencolor" : "black",
"fillcolor" : "black",
"resizemode" : "noresize",
"visible" : True,
"language": "english", # docstrings
"exampleturtle": "turtle",
"examplescreen": "screen",
"title": "Python Turtle Graphics",
"using_IDLE": False
}
class Vec2D(tuple):
"""A 2 dimensional vector class, used as a helper class
for implementing turtle graphics.
May be useful for turtle graphics programs also.
Derived from tuple, so a vector is a tuple!
Provides (for a, b vectors, k number):
a+b vector addition
a-b vector subtraction
a*b inner product
k*a and a*k multiplication with scalar
|a| absolute value of a
a.rotate(angle) rotation
"""
def __new__(cls, x, y):
return tuple.__new__(cls, (x, y))
def __add__(self, other):
return Vec2D(self[0]+other[0], self[1]+other[1])
def __mul__(self, other):
if isinstance(other, Vec2D):
return self[0]*other[0]+ self[1]*other[1]
return Vec2D(self[0]*other, self[1]*other)
def __rmul__(self, other):
if isinstance(other, int) or isinstance(other, float):
return Vec2D(self[0]*other, self[1]*other)
def __sub__(self, other):
return Vec2D(self[0]-other[0], self[1]-other[1])
def __neg__(self):
return Vec2D(-self[0], -self[1])
def __abs__(self):
return (self[0]**2 + self[1]**2)**0.5
def rotate(self, angle):
"""rotate self counterclockwise by angle
"""
perp = Vec2D(-self[1], self[0])
angle = angle * math.pi / 180.0
c, s = math.cos(angle), math.sin(angle)
return Vec2D(self[0]*c+perp[0]*s, self[1]*c+perp[1]*s)
def __getnewargs__(self):
return (self[0], self[1])
def __repr__(self):
return "(%.2f,%.2f)" % self
##############################################################################
### From here up to line : Tkinter - Interface for turtle.py ###
### May be replaced by an interface to some different graphics toolkit ###
##############################################################################
class _Root:
"""Root class for Screen based on Tkinter."""
def setupcanvas(self, width, height, cwidth, cheight):
self._svg=_svg.svg(Id="mycanvas", width=cwidth, height=cheight)
self._canvas=_svg.g(transform="translate(%d,%d)" % (cwidth//2, cheight//2))
self._svg <= self._canvas
def end(self):
def set_svg():
#have to do this to get animate to work...
document['container'].html=document['container'].html
if "mycanvas" not in document:
document["container"] <= self._svg
from browser import timer
#need this for chrome so that first few draw commands are viewed properly.
timer.set_timeout(set_svg, 1)
def _getcanvas(self):
return self._canvas
def win_width(self):
return self._canvas.width
def win_height(self):
return self._canvas.height
class TurtleScreenBase:
"""Provide the basic graphics functionality.
Interface between Tkinter and turtle.py.
To port turtle.py to some different graphics toolkit
a corresponding TurtleScreenBase class has to be implemented.
"""
#@staticmethod
#def _blankimage():
# """return a blank image object
# """
# pass
#@staticmethod
#def _image(filename):
# """return an image object containing the
# imagedata from a gif-file named filename.
# """
# pass
def __init__(self, cv):
self.cv = cv
self._previous_turtle_attributes={}
self._draw_pos=0
self.canvwidth = cv.width
self.canvheight = cv.height
self.xscale = self.yscale = 1.0
def _createpoly(self):
"""Create an invisible polygon item on canvas self.cv)
"""
#console.log("_createpoly")
pass
def _drawpoly(self, polyitem, coordlist, fill=None,
outline=None, width=None, top=False):
"""Configure polygonitem polyitem according to provided
arguments:
coordlist is sequence of coordinates
fill is filling color
outline is outline color
top is a boolean value, which specifies if polyitem
will be put on top of the canvas' displaylist so it
will not be covered by other items.
"""
#console.log("_drawpoly")
pass
def _drawline(self, lineitem, coordlist=None,
fill=None, width=None, top=False):
"""Configure lineitem according to provided arguments:
coordlist is sequence of coordinates
fill is drawing color
width is width of drawn line.
top is a boolean value, which specifies if polyitem
will be put on top of the canvas' displaylist so it
will not be covered by other items.
"""
#console.log("_drawline")
#if not isinstance(lineitem, Turtle):
# return
if coordlist is not None:
_x0, _y0=coordlist[0]
_x1, _y1=coordlist[1]
_dist=math.sqrt( (_x0-_x1)*(_x0-_x1) + (_y0-_y1)*(_y0-_y1) )
_dur="%4.2fs" % (0.01*_dist)
if _dur == '0.00s':
_dur='0.1s'
#_dur="%ss" % 1
self._draw_pos+=1
_shape=["%s,%s" % (_x, _y) for _x,_y in lineitem.get_shapepoly()]
if 0:
#if lineitem.isvisible():
if lineitem in self._previous_turtle_attributes:
_previous=self._previous_turtle_attributes[lineitem]
if _previous.heading() != lineitem.heading():
#if self._turtle_heading[lineitem] != lineitem.heading():
_rotate=_previous.heading()
_turtle=_svg.polygon(points=" ".join(_shape),
transform="rotate(%s)" % (_rotate-90),
style={'stroke': fill, 'fill': fill,
'stroke-width': width, 'display': 'none'})
# we need to rotate our turtle..
_turtle <= _svg.animateTransform(
Id="animateLine%s" % self._draw_pos,
attributeName="transform",
type="rotate",
attributeType="XML",
From=_rotate - 90,
to=lineitem.heading() -90,
dur=_dur,
begin="animateLine%s.end" % (self._draw_pos-1))
_turtle <= _svg.set(attributeName="display",
attributeType="CSS", to="block",
begin="animateLine%s.begin" % self._draw_pos,
end="animateLine%s.end" % self._draw_pos)
#_turtle <= _svg.animateMotion(From="%s,%s" % (_x0*self.xscale, _y0*self.yscale),
# to="%s,%s" % (_x0*self.xscale, _y0*self.yscale),
# begin="animateLine%s.begin" % self._draw_pos,
# end="animateLine%s.end" % self._draw_pos)
#_turtle <= _svg.animate(attributeName="fill",
# From=_previous.fill, to=fill, dur=_dur,
# begin="animateLine%s.begin" % self._draw_pos)
self._draw_pos+=1
self._canvas <= _turtle
_line= _svg.line(x1=_x0*self.xscale, y1=_y0*self.yscale,
x2=_x0*self.xscale, y2=_y0*self.yscale,
style={'stroke': fill, 'stroke-width': width})
_an1=_svg.animate(Id="animateLine%s" % self._draw_pos,
attributeName="x2", attributeType="XML",
From=_x0*self.xscale, to=_x1*self.xscale,
dur=_dur, fill='freeze')
_an2=_svg.animate(attributeName="y2", attributeType="XML",
begin="animateLine%s.begin" % self._draw_pos,
From=_y0*self.xscale, to=_y1*self.xscale,
dur=_dur, fill='freeze')
# draw turtle
if lineitem.isvisible():
_turtle=_svg.polygon(points=" ".join(_shape),
transform="rotate(%s)" % (lineitem.heading() - 90),
style={'stroke': fill, 'fill': fill,
'stroke-width': width, 'display': 'none'})
_turtle <= _svg.animateMotion(From="%s,%s" % (_x0*self.xscale, _y0*self.yscale),
to="%s,%s" % (_x1*self.xscale, _y1*self.yscale),
dur=_dur, begin="animateLine%s.begin" % self._draw_pos)
_turtle <= _svg.set(attributeName="display", attributeType="CSS",
to="block",
begin="animateLine%s.begin" % self._draw_pos,
end="animateLine%s.end" % self._draw_pos)
self._canvas <= _turtle
self._previous_turtle_attributes[lineitem]=lineitem
if self._draw_pos == 1:
_an1.setAttribute('begin', "0s")
else:
_an1.setAttribute('begin', "animateLine%s.end" % (self._draw_pos-1))
_line <= _an1
_line <= _an2
self._canvas <= _line
def _delete(self, item):
"""Delete graphics item from canvas.
If item is"all" delete all graphics items.
"""
pass
def _update(self):
"""Redraw graphics items on canvas
"""
pass
def _delay(self, delay):
"""Delay subsequent canvas actions for delay ms."""
pass
def _iscolorstring(self, color):
"""Check if the string color is a legal Tkinter color string.
"""
return True #fix me
#try:
# rgb = self.cv.winfo_rgb(color)
# ok = True
#except TK.TclError:
# ok = False
#return ok
def _bgcolor(self, color=None):
"""Set canvas' backgroundcolor if color is not None,
else return backgroundcolor."""
if color is not None:
self.cv.style.backgroundColor=color
else:
return self.cv.style.backgroundColor
def _write(self, pos, txt, align, font, pencolor):
"""Write txt at pos in canvas with specified font
and color.
Return text item and x-coord of right bottom corner
of text's bounding box."""
self._draw_pos+=1
_text= _svg.text(txt, x=pos[0], y=pos[1], fill=pencolor,
style={'display': 'none'})
_text <= _svg.animate(Id="animateLine%s" % self._draw_pos,
attributeName="display", attributeType="CSS",
From="block", to="block", dur="1s", fill='freeze',
begin="animateLine%s.end" % (self._draw_pos-1))
self._canvas <= _text
return Vec2D(pos[0]+50, pos[1]+50) #fix me
## def _dot(self, pos, size, color):
## """may be implemented for some other graphics toolkit"""
def _createimage(self, image):
"""Create and return image item on canvas.
"""
pass
def _drawimage(self, item, pos, image):
"""Configure image item as to draw image object
at position (x,y) on canvas)
"""
pass
def _setbgpic(self, item, image):
"""Configure image item as to draw image object
at center of canvas. Set item to the first item
in the displaylist, so it will be drawn below
any other item ."""
pass
def _type(self, item):
"""Return 'line' or 'polygon' or 'image' depending on
type of item.
"""
pass
def _resize(self, canvwidth=None, canvheight=None, bg=None):
"""Resize the canvas the turtles are drawing on. Does
not alter the drawing window.
"""
self.cv.style.width=canvwidth
self.cv.style.height=canvheight
if bg is not None:
self.cv.style.backgroundColor=bg
def _window_size(self):
""" Return the width and height of the turtle window.
"""
#for now just return canvas width/height
return self.cv.width, self.cv.height
def mainloop(self):
"""Starts event loop - calling Tkinter's mainloop function.
No argument.
Must be last statement in a turtle graphics program.
Must NOT be used if a script is run from within IDLE in -n mode
(No subprocess) - for interactive use of turtle graphics.
Example (for a TurtleScreen instance named screen):
>>> screen.mainloop()
"""
pass
def textinput(self, title, prompt):
"""Pop up a dialog window for input of a string.
Arguments: title is the title of the dialog window,
prompt is a text mostly describing what information to input.
Return the string input
If the dialog is canceled, return None.
Example (for a TurtleScreen instance named screen):
>>> screen.textinput("NIM", "Name of first player:")
"""
pass
def numinput(self, title, prompt, default=None, minval=None, maxval=None):
"""Pop up a dialog window for input of a number.
Arguments: title is the title of the dialog window,
prompt is a text mostly describing what numerical information to input.
default: default value
minval: minimum value for imput
maxval: maximum value for input
The number input must be in the range minval .. maxval if these are
given. If not, a hint is issued and the dialog remains open for
correction. Return the number input.
If the dialog is canceled, return None.
Example (for a TurtleScreen instance named screen):
>>> screen.numinput("Poker", "Your stakes:", 1000, minval=10, maxval=10000)
"""
pass
##############################################################################
### End of Tkinter - interface ###
##############################################################################
class Terminator (Exception):
"""Will be raised in TurtleScreen.update, if _RUNNING becomes False.
This stops execution of a turtle graphics script.
Main purpose: use in the Demo-Viewer turtle.Demo.py.
"""
pass
class TurtleGraphicsError(Exception):
"""Some TurtleGraphics Error
"""
pass
class Shape:
"""Data structure modeling shapes.
attribute _type is one of "polygon", "image", "compound"
attribute _data is - depending on _type a poygon-tuple,
an image or a list constructed using the addcomponent method.
"""
def __init__(self, type_, data=None):
self._type = type_
if type_ == "polygon":
if isinstance(data, list):
data = tuple(data)
elif type_ == "image":
if isinstance(data, str):
if data.lower().endswith(".gif") and isfile(data):
data = TurtleScreen._image(data)
# else data assumed to be Photoimage
elif type_ == "compound":
data = []
else:
raise TurtleGraphicsError("There is no shape type %s" % type_)
self._data = data
def addcomponent(self, poly, fill, outline=None):
"""Add component to a shape of type compound.
Arguments: poly is a polygon, i. e. a tuple of number pairs.
fill is the fillcolor of the component,
outline is the outline color of the component.
call (for a Shapeobject namend s):
-- s.addcomponent(((0,0), (10,10), (-10,10)), "red", "blue")
Example:
>>> poly = ((0,0),(10,-5),(0,10),(-10,-5))
>>> s = Shape("compound")
>>> s.addcomponent(poly, "red", "blue")
>>> # .. add more components and then use register_shape()
"""
if self._type != "compound":
raise TurtleGraphicsError("Cannot add component to %s Shape"
% self._type)
if outline is None:
outline = fill
self._data.append([poly, fill, outline])
class TurtleScreen(TurtleScreenBase):
"""Provides screen oriented methods like setbg etc.
Only relies upon the methods of TurtleScreenBase and NOT
upon components of the underlying graphics toolkit -
which is Tkinter in this case.
"""
_RUNNING = True
def __init__(self, cv, mode=_CFG["mode"],
colormode=_CFG["colormode"], delay=_CFG["delay"]):
self._shapes = {
"arrow" : Shape("polygon", ((-10,0), (10,0), (0,10))),
"turtle" : Shape("polygon", ((0,16), (-2,14), (-1,10), (-4,7),
(-7,9), (-9,8), (-6,5), (-7,1), (-5,-3), (-8,-6),
(-6,-8), (-4,-5), (0,-7), (4,-5), (6,-8), (8,-6),
(5,-3), (7,1), (6,5), (9,8), (7,9), (4,7), (1,10),
(2,14))),
"circle" : Shape("polygon", ((10,0), (9.51,3.09), (8.09,5.88),
(5.88,8.09), (3.09,9.51), (0,10), (-3.09,9.51),
(-5.88,8.09), (-8.09,5.88), (-9.51,3.09), (-10,0),
(-9.51,-3.09), (-8.09,-5.88), (-5.88,-8.09),
(-3.09,-9.51), (-0.00,-10.00), (3.09,-9.51),
(5.88,-8.09), (8.09,-5.88), (9.51,-3.09))),
"square" : Shape("polygon", ((10,-10), (10,10), (-10,10),
(-10,-10))),
"triangle" : Shape("polygon", ((10,-5.77), (0,11.55),
(-10,-5.77))),
"classic": Shape("polygon", ((0,0),(-5,-9),(0,-7),(5,-9))),
"blank" : Shape("image", None) #self._blankimage())
}
self._bgpics = {"nopic" : ""}
TurtleScreenBase.__init__(self, cv)
self._mode = mode
self._delayvalue = delay
self._colormode = _CFG["colormode"]
self._keys = []
self.clear()
def clear(self):
"""Delete all drawings and all turtles from the TurtleScreen.
No argument.
Reset empty TurtleScreen to its initial state: white background,
no backgroundimage, no eventbindings and tracing on.
Example (for a TurtleScreen instance named screen):
>>> screen.clear()
Note: this method is not available as function.
"""
self._delayvalue = _CFG["delay"]
self._colormode = _CFG["colormode"]
self._delete("all")
self._bgpic = self._createimage("")
self._bgpicname = "nopic"
self._tracing = 1
self._updatecounter = 0
self._turtles = []
self.bgcolor("white")
#for btn in 1, 2, 3:
# self.onclick(None, btn)
#self.onkeypress(None)
#for key in self._keys[:]:
# self.onkey(None, key)
# self.onkeypress(None, key)
Turtle._pen = None
def mode(self, mode=None):
"""Set turtle-mode ('standard', 'logo' or 'world') and perform reset.
Optional argument:
mode -- on of the strings 'standard', 'logo' or 'world'
Mode 'standard' is compatible with turtle.py.
Mode 'logo' is compatible with most Logo-Turtle-Graphics.
Mode 'world' uses userdefined 'worldcoordinates'. *Attention*: in
this mode angles appear distorted if x/y unit-ratio doesn't equal 1.
If mode is not given, return the current mode.
Mode Initial turtle heading positive angles
------------|-------------------------|-------------------
'standard' to the right (east) counterclockwise
'logo' upward (north) clockwise
Examples:
>>> mode('logo') # resets turtle heading to north
>>> mode()
'logo'
"""
if mode is None:
return self._mode
mode = mode.lower()
if mode not in ["standard", "logo", "world"]:
raise TurtleGraphicsError("No turtle-graphics-mode %s" % mode)
self._mode = mode
if mode in ["standard", "logo"]:
self._setscrollregion(-self.canvwidth//2, -self.canvheight//2,
self.canvwidth//2, self.canvheight//2)
self.xscale = self.yscale = 1.0
self.reset()
def setworldcoordinates(self, llx, lly, urx, ury):
"""Set up a user defined coordinate-system.
Arguments:
llx -- a number, x-coordinate of lower left corner of canvas
lly -- a number, y-coordinate of lower left corner of canvas
urx -- a number, x-coordinate of upper right corner of canvas
ury -- a number, y-coordinate of upper right corner of canvas
Set up user coodinat-system and switch to mode 'world' if necessary.
This performs a screen.reset. If mode 'world' is already active,
all drawings are redrawn according to the new coordinates.
But ATTENTION: in user-defined coordinatesystems angles may appear
distorted. (see Screen.mode())
Example (for a TurtleScreen instance named screen):
>>> screen.setworldcoordinates(-10,-0.5,50,1.5)
>>> for _ in range(36):
... left(10)
... forward(0.5)
"""
if self.mode() != "world":
self.mode("world")
xspan = float(urx - llx)
yspan = float(ury - lly)
wx, wy = self._window_size()
self.screensize(wx-20, wy-20)
oldxscale, oldyscale = self.xscale, self.yscale
self.xscale = self.canvwidth / xspan
self.yscale = self.canvheight / yspan
srx1 = llx * self.xscale
sry1 = -ury * self.yscale
srx2 = self.canvwidth + srx1
sry2 = self.canvheight + sry1
self._setscrollregion(srx1, sry1, srx2, sry2)
self._rescale(self.xscale/oldxscale, self.yscale/oldyscale)
#self.update()
def register_shape(self, name, shape=None):
"""Adds a turtle shape to TurtleScreen's shapelist.
Arguments:
(1) name is the name of a gif-file and shape is None.
Installs the corresponding image shape.
!! Image-shapes DO NOT rotate when turning the turtle,
!! so they do not display the heading of the turtle!
(2) name is an arbitrary string and shape is a tuple
of pairs of coordinates. Installs the corresponding
polygon shape
(3) name is an arbitrary string and shape is a
(compound) Shape object. Installs the corresponding
compound shape.
To use a shape, you have to issue the command shape(shapename).
call: register_shape("turtle.gif")
--or: register_shape("tri", ((0,0), (10,10), (-10,10)))
Example (for a TurtleScreen instance named screen):
>>> screen.register_shape("triangle", ((5,-3),(0,5),(-5,-3)))
"""
if shape is None:
# image
if name.lower().endswith(".gif"):
shape = Shape("image", self._image(name))
else:
raise TurtleGraphicsError("Bad arguments for register_shape.\n"
+ "Use help(register_shape)" )
elif isinstance(shape, tuple):
shape = Shape("polygon", shape)
## else shape assumed to be Shape-instance
self._shapes[name] = shape
def _colorstr(self, color):
"""Return color string corresponding to args.
Argument may be a string or a tuple of three
numbers corresponding to actual colormode,
i.e. in the range 0<=n<=colormode.
If the argument doesn't represent a color,
an error is raised.
"""
if len(color) == 1:
color = color[0]
if isinstance(color, str):
if self._iscolorstring(color) or color == "":
return color
else:
raise TurtleGraphicsError("bad color string: %s" % str(color))
try:
r, g, b = color
except:
raise TurtleGraphicsError("bad color arguments: %s" % str(color))
if self._colormode == 1.0:
r, g, b = [round(255.0*x) for x in (r, g, b)]
if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)):
raise TurtleGraphicsError("bad color sequence: %s" % str(color))
return "#%02x%02x%02x" % (r, g, b)
def _color(self, cstr):
if not cstr.startswith("#"):
return cstr
if len(cstr) == 7:
cl = [int(cstr[i:i+2], 16) for i in (1, 3, 5)]
elif len(cstr) == 4:
cl = [16*int(cstr[h], 16) for h in cstr[1:]]
else:
raise TurtleGraphicsError("bad colorstring: %s" % cstr)
return tuple([c * self._colormode/255 for c in cl])
def colormode(self, cmode=None):
"""Return the colormode or set it to 1.0 or 255.
Optional argument:
cmode -- one of the values 1.0 or 255
r, g, b values of colortriples have to be in range 0..cmode.
Example (for a TurtleScreen instance named screen):
>>> screen.colormode()
1.0
>>> screen.colormode(255)
>>> pencolor(240,160,80)
"""
if cmode is None:
return self._colormode
if cmode == 1.0:
self._colormode = float(cmode)
elif cmode == 255:
self._colormode = int(cmode)
def reset(self):
"""Reset all Turtles on the Screen to their initial state.
No argument.
Example (for a TurtleScreen instance named screen):
>>> screen.reset()
"""
for turtle in self._turtles:
turtle._setmode(self._mode)
turtle.reset()
def turtles(self):
"""Return the list of turtles on the screen.
Example (for a TurtleScreen instance named screen):
>>> screen.turtles()
[<turtle.Turtle object at 0x00E11FB0>]
"""
return self._turtles
def bgcolor(self, *args):
"""Set or return backgroundcolor of the TurtleScreen.
Arguments (if given): a color string or three numbers
in the range 0..colormode or a 3-tuple of such numbers.
Example (for a TurtleScreen instance named screen):
>>> screen.bgcolor("orange")
>>> screen.bgcolor()
'orange'
>>> screen.bgcolor(0.5,0,0.5)
>>> screen.bgcolor()
'#800080'
"""
if args:
color = self._colorstr(args)
else:
color = None
color = self._bgcolor(color)
if color is not None:
color = self._color(color)
return color
def tracer(self, n=None, delay=None):
"""Turns turtle animation on/off and set delay for update drawings.
Optional arguments:
n -- nonnegative integer
delay -- nonnegative integer
If n is given, only each n-th regular screen update is really performed.
(Can be used to accelerate the drawing of complex graphics.)
Second arguments sets delay value (see RawTurtle.delay())
Example (for a TurtleScreen instance named screen):
>>> screen.tracer(8, 25)
>>> dist = 2
>>> for i in range(200):
... fd(dist)
... rt(90)
... dist += 2
"""
if n is None:
return self._tracing
self._tracing = int(n)
self._updatecounter = 0
if delay is not None:
self._delayvalue = int(delay)
if self._tracing:
self.update()
def delay(self, delay=None):
""" Return or set the drawing delay in milliseconds.
Optional argument:
delay -- positive integer
Example (for a TurtleScreen instance named screen):
>>> screen.delay(15)
>>> screen.delay()
15
"""
if delay is None:
return self._delayvalue
self._delayvalue = int(delay)
def _incrementudc(self):
"""Increment update counter."""
if not TurtleScreen._RUNNING:
TurtleScreen._RUNNNING = True
raise Terminator
if self._tracing > 0:
self._updatecounter += 1
self._updatecounter %= self._tracing
def update(self):
"""Perform a TurtleScreen update.
"""
return
tracing = self._tracing
self._tracing = True
for t in self.turtles():
#t._update_data()
t._drawturtle()
self._tracing = tracing
self._update()
def window_width(self):
""" Return the width of the turtle window.
Example (for a TurtleScreen instance named screen):
>>> screen.window_width()
640
"""
return self._window_size()[0]
def window_height(self):
""" Return the height of the turtle window.
Example (for a TurtleScreen instance named screen):
>>> screen.window_height()
480
"""
return self._window_size()[1]
def getcanvas(self):
"""Return the Canvas of this TurtleScreen.
No argument.
Example (for a Screen instance named screen):
>>> cv = screen.getcanvas()
>>> cv
<turtle.ScrolledCanvas instance at 0x010742D8>
"""
return self.cv
def getshapes(self):
"""Return a list of names of all currently available turtle shapes.
No argument.
Example (for a TurtleScreen instance named screen):
>>> screen.getshapes()
['arrow', 'blank', 'circle', ... , 'turtle']
"""
return sorted(self._shapes.keys())
def onclick(self, fun, btn=1, add=None):
"""Bind fun to mouse-click event on canvas.
Arguments:
fun -- a function with two arguments, the coordinates of the
clicked point on the canvas.
num -- the number of the mouse-button, defaults to 1
Example (for a TurtleScreen instance named screen)
>>> screen.onclick(goto)
>>> # Subsequently clicking into the TurtleScreen will
>>> # make the turtle move to the clicked point.
>>> screen.onclick(None)
"""
self._onscreenclick(fun, btn, add)
def onkey(self, fun, key):
"""Bind fun to key-release event of key.
Arguments:
fun -- a function with no arguments
key -- a string: key (e.g. "a") or key-symbol (e.g. "space")
In order to be able to register key-events, TurtleScreen
must have focus. (See method listen.)
Example (for a TurtleScreen instance named screen):
>>> def f():
... fd(50)
... lt(60)
...
>>> screen.onkey(f, "Up")
>>> screen.listen()
Subsequently the turtle can be moved by repeatedly pressing
the up-arrow key, consequently drawing a hexagon
"""
if fun is None:
if key in self._keys:
self._keys.remove(key)
elif key not in self._keys:
self._keys.append(key)
self._onkeyrelease(fun, key)
def onkeypress(self, fun, key=None):
"""Bind fun to key-press event of key if key is given,
or to any key-press-event if no key is given.
Arguments:
fun -- a function with no arguments
key -- a string: key (e.g. "a") or key-symbol (e.g. "space")
In order to be able to register key-events, TurtleScreen
must have focus. (See method listen.)
Example (for a TurtleScreen instance named screen
and a Turtle instance named turtle):
>>> def f():
... fd(50)
... lt(60)
...
>>> screen.onkeypress(f, "Up")
>>> screen.listen()
Subsequently the turtle can be moved by repeatedly pressing
the up-arrow key, or by keeping pressed the up-arrow key.
consequently drawing a hexagon.
"""
if fun is None:
if key in self._keys:
self._keys.remove(key)
elif key is not None and key not in self._keys:
self._keys.append(key)
self._onkeypress(fun, key)
def listen(self, xdummy=None, ydummy=None):
"""Set focus on TurtleScreen (in order to collect key-events)
No arguments.
Dummy arguments are provided in order
to be able to pass listen to the onclick method.
Example (for a TurtleScreen instance named screen):
>>> screen.listen()
"""
self._listen()
def ontimer(self, fun, t=0):
"""Install a timer, which calls fun after t milliseconds.
Arguments:
fun -- a function with no arguments.
t -- a number >= 0
Example (for a TurtleScreen instance named screen):
>>> running = True
>>> def f():
... if running:
... fd(50)
... lt(60)
... screen.ontimer(f, 250)
...
>>> f() # makes the turtle marching around
>>> running = False
"""
self._ontimer(fun, t)
def bgpic(self, picname=None):
"""Set background image or return name of current backgroundimage.
Optional argument:
picname -- a string, name of a gif-file or "nopic".
If picname is a filename, set the corresponding image as background.
If picname is "nopic", delete backgroundimage, if present.
If picname is None, return the filename of the current backgroundimage.
Example (for a TurtleScreen instance named screen):
>>> screen.bgpic()
'nopic'
>>> screen.bgpic("landscape.gif")
>>> screen.bgpic()
'landscape.gif'
"""
if picname is None:
return self._bgpicname
if picname not in self._bgpics:
self._bgpics[picname] = self._image(picname)
self._setbgpic(self._bgpic, self._bgpics[picname])
self._bgpicname = picname
def screensize(self, canvwidth=None, canvheight=None, bg=None):
"""Resize the canvas the turtles are drawing on.
Optional arguments:
canvwidth -- positive integer, new width of canvas in pixels
canvheight -- positive integer, new height of canvas in pixels
bg -- colorstring or color-tuple, new backgroundcolor
If no arguments are given, return current (canvaswidth, canvasheight)
Do not alter the drawing window. To observe hidden parts of
the canvas use the scrollbars. (Can make visible those parts
of a drawing, which were outside the canvas before!)
Example (for a Turtle instance named turtle):
>>> turtle.screensize(2000,1500)
>>> # e.g. to search for an erroneously escaped turtle ;-)
"""
return self._resize(canvwidth, canvheight, bg)
onscreenclick = onclick
resetscreen = reset
clearscreen = clear
addshape = register_shape
onkeyrelease = onkey
class TNavigator:
"""Navigation part of the RawTurtle.
Implements methods for turtle movement.
"""
START_ORIENTATION = {
"standard": Vec2D(1.0, 0.0),
"world" : Vec2D(1.0, 0.0),
"logo" : Vec2D(0.0, 1.0) }
DEFAULT_MODE = "standard"
DEFAULT_ANGLEOFFSET = 0
DEFAULT_ANGLEORIENT = 1
def __init__(self, mode=DEFAULT_MODE):
self._angleOffset = self.DEFAULT_ANGLEOFFSET
self._angleOrient = self.DEFAULT_ANGLEORIENT
self._mode = mode
self.undobuffer = None
self.degrees()
self._mode = None
self._setmode(mode)
TNavigator.reset(self)
def reset(self):
"""reset turtle to its initial values
Will be overwritten by parent class
"""
self._position = Vec2D(0.0, 0.0)
self._orient = TNavigator.START_ORIENTATION[self._mode]
def _setmode(self, mode=None):
"""Set turtle-mode to 'standard', 'world' or 'logo'.
"""
if mode is None:
return self._mode
if mode not in ["standard", "logo", "world"]:
return
self._mode = mode
if mode in ["standard", "world"]:
self._angleOffset = 0
self._angleOrient = 1
else: # mode == "logo":
self._angleOffset = self._fullcircle/4.
self._angleOrient = -1
def _setDegreesPerAU(self, fullcircle):
"""Helper function for degrees() and radians()"""
self._fullcircle = fullcircle
self._degreesPerAU = 360/fullcircle
if self._mode == "standard":
self._angleOffset = 0
else:
self._angleOffset = fullcircle/4.
def degrees(self, fullcircle=360.0):
""" Set angle measurement units to degrees.
Optional argument:
fullcircle - a number
Set angle measurement units, i. e. set number
of 'degrees' for a full circle. Dafault value is
360 degrees.
Example (for a Turtle instance named turtle):
>>> turtle.left(90)
>>> turtle.heading()
90
Change angle measurement unit to grad (also known as gon,
grade, or gradian and equals 1/100-th of the right angle.)
>>> turtle.degrees(400.0)
>>> turtle.heading()
100
"""
self._setDegreesPerAU(fullcircle)
def radians(self):
""" Set the angle measurement units to radians.
No arguments.
Example (for a Turtle instance named turtle):
>>> turtle.heading()
90
>>> turtle.radians()
>>> turtle.heading()
1.5707963267948966
"""
self._setDegreesPerAU(2*math.pi)
def _go(self, distance):
"""move turtle forward by specified distance"""
#console.log('_go')
ende = self._position + self._orient * distance
self._goto(ende)
def _rotate(self, angle):
"""Turn turtle counterclockwise by specified angle if angle > 0."""
#console.log('_rotate')
angle *= self._degreesPerAU
self._orient = self._orient.rotate(angle)
def _goto(self, end):
"""move turtle to position end."""
#console.log('_goto')
self._position = end
def forward(self, distance):
"""Move the turtle forward by the specified distance.
Aliases: forward | fd
Argument:
distance -- a number (integer or float)
Move the turtle forward by the specified distance, in the direction
the turtle is headed.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 0.00)
>>> turtle.forward(25)
>>> turtle.position()
(25.00,0.00)
>>> turtle.forward(-75)
>>> turtle.position()
(-50.00,0.00)
"""
self._go(distance)
def back(self, distance):
"""Move the turtle backward by distance.
Aliases: back | backward | bk
Argument:
distance -- a number
Move the turtle backward by distance ,opposite to the direction the
turtle is headed. Do not change the turtle's heading.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 0.00)
>>> turtle.backward(30)
>>> turtle.position()
(-30.00, 0.00)
"""
self._go(-distance)
def right(self, angle):
"""Turn turtle right by angle units.
Aliases: right | rt
Argument:
angle -- a number (integer or float)
Turn turtle right by angle units. (Units are by default degrees,
but can be set via the degrees() and radians() functions.)
Angle orientation depends on mode. (See this.)
Example (for a Turtle instance named turtle):
>>> turtle.heading()
22.0
>>> turtle.right(45)
>>> turtle.heading()
337.0
"""
self._rotate(-angle)
def left(self, angle):
"""Turn turtle left by angle units.
Aliases: left | lt
Argument:
angle -- a number (integer or float)
Turn turtle left by angle units. (Units are by default degrees,
but can be set via the degrees() and radians() functions.)
Angle orientation depends on mode. (See this.)
Example (for a Turtle instance named turtle):
>>> turtle.heading()
22.0
>>> turtle.left(45)
>>> turtle.heading()
67.0
"""
self._rotate(angle)
def pos(self):
"""Return the turtle's current location (x,y), as a Vec2D-vector.
Aliases: pos | position
No arguments.
Example (for a Turtle instance named turtle):
>>> turtle.pos()
(0.00, 240.00)
"""
return self._position
def xcor(self):
""" Return the turtle's x coordinate.
No arguments.
Example (for a Turtle instance named turtle):
>>> reset()
>>> turtle.left(60)
>>> turtle.forward(100)
>>> print turtle.xcor()
50.0
"""
return self._position[0]
def ycor(self):
""" Return the turtle's y coordinate
---
No arguments.
Example (for a Turtle instance named turtle):
>>> reset()
>>> turtle.left(60)
>>> turtle.forward(100)
>>> print turtle.ycor()
86.6025403784
"""
return self._position[1]
def goto(self, x, y=None):
"""Move turtle to an absolute position.
Aliases: setpos | setposition | goto:
Arguments:
x -- a number or a pair/vector of numbers
y -- a number None
call: goto(x, y) # two coordinates
--or: goto((x, y)) # a pair (tuple) of coordinates
--or: goto(vec) # e.g. as returned by pos()
Move turtle to an absolute position. If the pen is down,
a line will be drawn. The turtle's orientation does not change.
Example (for a Turtle instance named turtle):
>>> tp = turtle.pos()
>>> tp
(0.00, 0.00)
>>> turtle.setpos(60,30)
>>> turtle.pos()
(60.00,30.00)
>>> turtle.setpos((20,80))
>>> turtle.pos()
(20.00,80.00)
>>> turtle.setpos(tp)
>>> turtle.pos()
(0.00,0.00)
"""
if y is None:
self._goto(Vec2D(*x))
else:
self._goto(Vec2D(x, y))
def home(self):
"""Move turtle to the origin - coordinates (0,0).
No arguments.
Move turtle to the origin - coordinates (0,0) and set its
heading to its start-orientation (which depends on mode).
Example (for a Turtle instance named turtle):
>>> turtle.home()
"""
self.goto(0, 0)
self.setheading(0)
def setx(self, x):
"""Set the turtle's first coordinate to x
Argument:
x -- a number (integer or float)
Set the turtle's first coordinate to x, leave second coordinate
unchanged.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 240.00)
>>> turtle.setx(10)
>>> turtle.position()
(10.00, 240.00)
"""
self._goto(Vec2D(x, self._position[1]))
def sety(self, y):
"""Set the turtle's second coordinate to y
Argument:
y -- a number (integer or float)
Set the turtle's first coordinate to x, second coordinate remains
unchanged.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 40.00)
>>> turtle.sety(-10)
>>> turtle.position()
(0.00, -10.00)
"""
self._goto(Vec2D(self._position[0], y))
def distance(self, x, y=None):
"""Return the distance from the turtle to (x,y) in turtle step units.
Arguments:
x -- a number or a pair/vector of numbers or a turtle instance
y -- a number None None
call: distance(x, y) # two coordinates
--or: distance((x, y)) # a pair (tuple) of coordinates
--or: distance(vec) # e.g. as returned by pos()
--or: distance(mypen) # where mypen is another turtle
Example (for a Turtle instance named turtle):
>>> turtle.pos()
(0.00, 0.00)
>>> turtle.distance(30,40)
50.0
>>> pen = Turtle()
>>> pen.forward(77)
>>> turtle.distance(pen)
77.0
"""
if y is not None:
pos = Vec2D(x, y)
if isinstance(x, Vec2D):
pos = x
elif isinstance(x, tuple):
pos = Vec2D(*x)
elif isinstance(x, TNavigator):
pos = x._position
return abs(pos - self._position)
def towards(self, x, y=None):
"""Return the angle of the line from the turtle's position to (x, y).
Arguments:
x -- a number or a pair/vector of numbers or a turtle instance
y -- a number None None
call: distance(x, y) # two coordinates
--or: distance((x, y)) # a pair (tuple) of coordinates
--or: distance(vec) # e.g. as returned by pos()
--or: distance(mypen) # where mypen is another turtle
Return the angle, between the line from turtle-position to position
specified by x, y and the turtle's start orientation. (Depends on
modes - "standard" or "logo")
Example (for a Turtle instance named turtle):
>>> turtle.pos()
(10.00, 10.00)
>>> turtle.towards(0,0)
225.0
"""
if y is not None:
pos = Vec2D(x, y)
if isinstance(x, Vec2D):
pos = x
elif isinstance(x, tuple):
pos = Vec2D(*x)
elif isinstance(x, TNavigator):
pos = x._position
x, y = pos - self._position
result = round(math.atan2(y, x)*180.0/math.pi, 10) % 360.0
result /= self._degreesPerAU
return (self._angleOffset + self._angleOrient*result) % self._fullcircle
def heading(self):
""" Return the turtle's current heading.
No arguments.
Example (for a Turtle instance named turtle):
>>> turtle.left(67)
>>> turtle.heading()
67.0
"""
x, y = self._orient
result = round(math.atan2(y, x)*180.0/math.pi, 10) % 360.0
result /= self._degreesPerAU
return (self._angleOffset + self._angleOrient*result) % self._fullcircle
def setheading(self, to_angle):
"""Set the orientation of the turtle to to_angle.
Aliases: setheading | seth
Argument:
to_angle -- a number (integer or float)
Set the orientation of the turtle to to_angle.
Here are some common directions in degrees:
standard - mode: logo-mode:
-------------------|--------------------
0 - east 0 - north
90 - north 90 - east
180 - west 180 - south
270 - south 270 - west
Example (for a Turtle instance named turtle):
>>> turtle.setheading(90)
>>> turtle.heading()
90
"""
angle = (to_angle - self.heading())*self._angleOrient
full = self._fullcircle
angle = (angle+full/2.)%full - full/2.
self._rotate(angle)
def circle(self, radius, extent = None, steps = None):
""" Draw a circle with given radius.
Arguments:
radius -- a number
extent (optional) -- a number
steps (optional) -- an integer
Draw a circle with given radius. The center is radius units left
of the turtle; extent - an angle - determines which part of the
circle is drawn. If extent is not given, draw the entire circle.
If extent is not a full circle, one endpoint of the arc is the
current pen position. Draw the arc in counterclockwise direction
if radius is positive, otherwise in clockwise direction. Finally
the direction of the turtle is changed by the amount of extent.
As the circle is approximated by an inscribed regular polygon,
steps determines the number of steps to use. If not given,
it will be calculated automatically. Maybe used to draw regular
polygons.
call: circle(radius) # full circle
--or: circle(radius, extent) # arc
--or: circle(radius, extent, steps)
--or: circle(radius, steps=6) # 6-sided polygon
Example (for a Turtle instance named turtle):
>>> turtle.circle(50)
>>> turtle.circle(120, 180) # semicircle
"""
if self.undobuffer:
self.undobuffer.push(["seq"])
self.undobuffer.cumulate = True
speed = self.speed()
if extent is None:
extent = self._fullcircle
if steps is None:
frac = abs(extent)/self._fullcircle
steps = 1+int(min(11+abs(radius)/6.0, 59.0)*frac)
w = 1.0 * extent / steps
w2 = 0.5 * w
l = 2.0 * radius * math.sin(w2*math.pi/180.0*self._degreesPerAU)
if radius < 0:
l, w, w2 = -l, -w, -w2
tr = self._tracer()
dl = self._delay()
if speed == 0:
self._tracer(0, 0)
else:
self.speed(0)
self._rotate(w2)
for i in range(steps):
self.speed(speed)
self._go(l)
self.speed(0)
self._rotate(w)
self._rotate(-w2)
if speed == 0:
self._tracer(tr, dl)
self.speed(speed)
if self.undobuffer:
self.undobuffer.cumulate = False
## three dummy methods to be implemented by child class:
def speed(self, s=0):
"""dummy method - to be overwritten by child class"""
def _tracer(self, a=None, b=None):
"""dummy method - to be overwritten by child class"""
def _delay(self, n=None):
"""dummy method - to be overwritten by child class"""
fd = forward
bk = back
backward = back
rt = right
lt = left
position = pos
setpos = goto
setposition = goto
seth = setheading
class TPen:
"""Drawing part of the RawTurtle.
Implements drawing properties.
"""
def __init__(self, resizemode=_CFG["resizemode"]):
self._resizemode = resizemode # or "user" or "noresize"
self.undobuffer = None
TPen._reset(self)
def _reset(self, pencolor=_CFG["pencolor"],
fillcolor=_CFG["fillcolor"]):
self._pensize = 1
self._shown = True
self._pencolor = pencolor
self._fillcolor = fillcolor
self._drawing = True
self._speed = 3
self._stretchfactor = (1., 1.)
self._shearfactor = 0.
self._tilt = 0.
self._shapetrafo = (1., 0., 0., 1.)
self._outlinewidth = 1
def resizemode(self, rmode=None):
"""Set resizemode to one of the values: "auto", "user", "noresize".
(Optional) Argument:
rmode -- one of the strings "auto", "user", "noresize"
Different resizemodes have the following effects:
- "auto" adapts the appearance of the turtle
corresponding to the value of pensize.
- "user" adapts the appearance of the turtle according to the
values of stretchfactor and outlinewidth (outline),
which are set by shapesize()
- "noresize" no adaption of the turtle's appearance takes place.
If no argument is given, return current resizemode.
resizemode("user") is called by a call of shapesize with arguments.
Examples (for a Turtle instance named turtle):
>>> turtle.resizemode("noresize")
>>> turtle.resizemode()
'noresize'
"""
if rmode is None:
return self._resizemode
rmode = rmode.lower()
if rmode in ["auto", "user", "noresize"]:
self.pen(resizemode=rmode)
def pensize(self, width=None):
"""Set or return the line thickness.
Aliases: pensize | width
Argument:
width -- positive number
Set the line thickness to width or return it. If resizemode is set
to "auto" and turtleshape is a polygon, that polygon is drawn with
the same line thickness. If no argument is given, current pensize
is returned.
Example (for a Turtle instance named turtle):
>>> turtle.pensize()
1
>>> turtle.pensize(10) # from here on lines of width 10 are drawn
"""
if width is None:
return self._pensize
self.pen(pensize=width)
def penup(self):
"""Pull the pen up -- no drawing when moving.
Aliases: penup | pu | up
No argument
Example (for a Turtle instance named turtle):
>>> turtle.penup()
"""
if not self._drawing:
return
self.pen(pendown=False)
def pendown(self):
"""Pull the pen down -- drawing when moving.
Aliases: pendown | pd | down
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.pendown()
"""
if self._drawing:
return
self.pen(pendown=True)
def isdown(self):
"""Return True if pen is down, False if it's up.
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.penup()
>>> turtle.isdown()
False
>>> turtle.pendown()
>>> turtle.isdown()
True
"""
return self._drawing
def speed(self, speed=None):
""" Return or set the turtle's speed.
Optional argument:
speed -- an integer in the range 0..10 or a speedstring (see below)
Set the turtle's speed to an integer value in the range 0 .. 10.
If no argument is given: return current speed.
If input is a number greater than 10 or smaller than 0.5,
speed is set to 0.
Speedstrings are mapped to speedvalues in the following way:
'fastest' : 0
'fast' : 10
'normal' : 6
'slow' : 3
'slowest' : 1
speeds from 1 to 10 enforce increasingly faster animation of
line drawing and turtle turning.
Attention:
speed = 0 : *no* animation takes place. forward/back makes turtle jump
and likewise left/right make the turtle turn instantly.
Example (for a Turtle instance named turtle):
>>> turtle.speed(3)
"""
speeds = {'fastest':0, 'fast':10, 'normal':6, 'slow':3, 'slowest':1 }
if speed is None:
return self._speed
if speed in speeds:
speed = speeds[speed]
elif 0.5 < speed < 10.5:
speed = int(round(speed))
else:
speed = 0
self.pen(speed=speed)
def color(self, *args):
"""Return or set the pencolor and fillcolor.
Arguments:
Several input formats are allowed.
They use 0, 1, 2, or 3 arguments as follows:
color()
Return the current pencolor and the current fillcolor
as a pair of color specification strings as are returned
by pencolor and fillcolor.
color(colorstring), color((r,g,b)), color(r,g,b)
inputs as in pencolor, set both, fillcolor and pencolor,
to the given value.
color(colorstring1, colorstring2),
color((r1,g1,b1), (r2,g2,b2))
equivalent to pencolor(colorstring1) and fillcolor(colorstring2)
and analogously, if the other input format is used.
If turtleshape is a polygon, outline and interior of that polygon
is drawn with the newly set colors.
For mor info see: pencolor, fillcolor
Example (for a Turtle instance named turtle):
>>> turtle.color('red', 'green')
>>> turtle.color()
('red', 'green')
>>> colormode(255)
>>> color((40, 80, 120), (160, 200, 240))
>>> color()
('#285078', '#a0c8f0')
"""
if args:
l = len(args)
if l == 1:
pcolor = fcolor = args[0]
elif l == 2:
pcolor, fcolor = args
elif l == 3:
pcolor = fcolor = args
pcolor = self._colorstr(pcolor)
fcolor = self._colorstr(fcolor)
self.pen(pencolor=pcolor, fillcolor=fcolor)
else:
return self._color(self._pencolor), self._color(self._fillcolor)
def pencolor(self, *args):
""" Return or set the pencolor.
Arguments:
Four input formats are allowed:
- pencolor()
Return the current pencolor as color specification string,
possibly in hex-number format (see example).
May be used as input to another color/pencolor/fillcolor call.
- pencolor(colorstring)
s is a Tk color specification string, such as "red" or "yellow"
- pencolor((r, g, b))
*a tuple* of r, g, and b, which represent, an RGB color,
and each of r, g, and b are in the range 0..colormode,
where colormode is either 1.0 or 255
- pencolor(r, g, b)
r, g, and b represent an RGB color, and each of r, g, and b
are in the range 0..colormode
If turtleshape is a polygon, the outline of that polygon is drawn
with the newly set pencolor.
Example (for a Turtle instance named turtle):
>>> turtle.pencolor('brown')
>>> tup = (0.2, 0.8, 0.55)
>>> turtle.pencolor(tup)
>>> turtle.pencolor()
'#33cc8c'
"""
if args:
color = self._colorstr(args)
if color == self._pencolor:
return
self.pen(pencolor=color)
else:
return self._color(self._pencolor)
def fillcolor(self, *args):
""" Return or set the fillcolor.
Arguments:
Four input formats are allowed:
- fillcolor()
Return the current fillcolor as color specification string,
possibly in hex-number format (see example).
May be used as input to another color/pencolor/fillcolor call.
- fillcolor(colorstring)
s is a Tk color specification string, such as "red" or "yellow"
- fillcolor((r, g, b))
*a tuple* of r, g, and b, which represent, an RGB color,
and each of r, g, and b are in the range 0..colormode,
where colormode is either 1.0 or 255
- fillcolor(r, g, b)
r, g, and b represent an RGB color, and each of r, g, and b
are in the range 0..colormode
If turtleshape is a polygon, the interior of that polygon is drawn
with the newly set fillcolor.
Example (for a Turtle instance named turtle):
>>> turtle.fillcolor('violet')
>>> col = turtle.pencolor()
>>> turtle.fillcolor(col)
>>> turtle.fillcolor(0, .5, 0)
"""
if args:
color = self._colorstr(args)
if color == self._fillcolor:
return
self.pen(fillcolor=color)
else:
return self._color(self._fillcolor)
def showturtle(self):
"""Makes the turtle visible.
Aliases: showturtle | st
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.hideturtle()
>>> turtle.showturtle()
"""
self.pen(shown=True)
def hideturtle(self):
"""Makes the turtle invisible.
Aliases: hideturtle | ht
No argument.
It's a good idea to do this while you're in the
middle of a complicated drawing, because hiding
the turtle speeds up the drawing observably.
Example (for a Turtle instance named turtle):
>>> turtle.hideturtle()
"""
self.pen(shown=False)
def isvisible(self):
"""Return True if the Turtle is shown, False if it's hidden.
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.hideturtle()
>>> print turtle.isvisible():
False
"""
return self._shown
def pen(self, pen=None, **pendict):
"""Return or set the pen's attributes.
Arguments:
pen -- a dictionary with some or all of the below listed keys.
**pendict -- one or more keyword-arguments with the below
listed keys as keywords.
Return or set the pen's attributes in a 'pen-dictionary'
with the following key/value pairs:
"shown" : True/False
"pendown" : True/False
"pencolor" : color-string or color-tuple
"fillcolor" : color-string or color-tuple
"pensize" : positive number
"speed" : number in range 0..10
"resizemode" : "auto" or "user" or "noresize"
"stretchfactor": (positive number, positive number)
"shearfactor": number
"outline" : positive number
"tilt" : number
This dictionary can be used as argument for a subsequent
pen()-call to restore the former pen-state. Moreover one
or more of these attributes can be provided as keyword-arguments.
This can be used to set several pen attributes in one statement.
Examples (for a Turtle instance named turtle):
>>> turtle.pen(fillcolor="black", pencolor="red", pensize=10)
>>> turtle.pen()
{'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1,
'pencolor': 'red', 'pendown': True, 'fillcolor': 'black',
'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0}
>>> penstate=turtle.pen()
>>> turtle.color("yellow","")
>>> turtle.penup()
>>> turtle.pen()
{'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1,
'pencolor': 'yellow', 'pendown': False, 'fillcolor': '',
'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0}
>>> p.pen(penstate, fillcolor="green")
>>> p.pen()
{'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1,
'pencolor': 'red', 'pendown': True, 'fillcolor': 'green',
'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0}
"""
_pd = {"shown" : self._shown,
"pendown" : self._drawing,
"pencolor" : self._pencolor,
"fillcolor" : self._fillcolor,
"pensize" : self._pensize,
"speed" : self._speed,
"resizemode" : self._resizemode,
"stretchfactor" : self._stretchfactor,
"shearfactor" : self._shearfactor,
"outline" : self._outlinewidth,
"tilt" : self._tilt
}
#console.log('pen')
if not (pen or pendict):
return _pd
if isinstance(pen, dict):
p = pen
else:
p = {}
p.update(pendict)
_p_buf = {}
for key in p:
_p_buf[key] = _pd[key]
if self.undobuffer:
self.undobuffer.push(("pen", _p_buf))
newLine = False
if "pendown" in p:
if self._drawing != p["pendown"]:
newLine = True
if "pencolor" in p:
if isinstance(p["pencolor"], tuple):
p["pencolor"] = self._colorstr((p["pencolor"],))
if self._pencolor != p["pencolor"]:
newLine = True
if "pensize" in p:
if self._pensize != p["pensize"]:
newLine = True
if newLine:
self._newLine()
if "pendown" in p:
self._drawing = p["pendown"]
if "pencolor" in p:
self._pencolor = p["pencolor"]
if "pensize" in p:
self._pensize = p["pensize"]
if "fillcolor" in p:
if isinstance(p["fillcolor"], tuple):
p["fillcolor"] = self._colorstr((p["fillcolor"],))
self._fillcolor = p["fillcolor"]
if "speed" in p:
self._speed = p["speed"]
if "resizemode" in p:
self._resizemode = p["resizemode"]
if "stretchfactor" in p:
sf = p["stretchfactor"]
if isinstance(sf, (int, float)):
sf = (sf, sf)
self._stretchfactor = sf
if "shearfactor" in p:
self._shearfactor = p["shearfactor"]
if "outline" in p:
self._outlinewidth = p["outline"]
if "shown" in p:
self._shown = p["shown"]
if "tilt" in p:
self._tilt = p["tilt"]
if "stretchfactor" in p or "tilt" in p or "shearfactor" in p:
scx, scy = self._stretchfactor
shf = self._shearfactor
sa, ca = math.sin(self._tilt), math.cos(self._tilt)
self._shapetrafo = ( scx*ca, scy*(shf*ca + sa),
-scx*sa, scy*(ca - shf*sa))
self._update()
## three dummy methods to be implemented by child class:
def _newLine(self, usePos = True):
"""dummy method - to be overwritten by child class"""
def _update(self, count=True, forced=False):
"""dummy method - to be overwritten by child class"""
def _color(self, args):
"""dummy method - to be overwritten by child class"""
def _colorstr(self, args):
"""dummy method - to be overwritten by child class"""
width = pensize
up = penup
pu = penup
pd = pendown
down = pendown
st = showturtle
ht = hideturtle
class _TurtleImage:
"""Helper class: Datatype to store Turtle attributes
"""
def __init__(self, screen, shapeIndex):
self.screen = screen
self._type = None
self._setshape(shapeIndex)
def _setshape(self, shapeIndex):
#console.log("_setshape", self._type)
screen = self.screen
self.shapeIndex = shapeIndex
if self._type == "polygon" == screen._shapes[shapeIndex]._type:
return
if self._type == "image" == screen._shapes[shapeIndex]._type:
return
if self._type in ["image", "polygon"]:
screen._delete(self._item)
elif self._type == "compound":
for item in self._item:
screen._delete(item)
self._type = screen._shapes[shapeIndex]._type
return
#console.log(self._type)
if self._type == "polygon":
self._item = screen._createpoly()
elif self._type == "image":
self._item = screen._createimage(screen._shapes["blank"]._data)
elif self._type == "compound":
self._item = [screen._createpoly() for item in
screen._shapes[shapeIndex]._data]
#console.log(self._item)
class RawTurtle(TPen, TNavigator):
"""Animation part of the RawTurtle.
Puts RawTurtle upon a TurtleScreen and provides tools for
its animation.
"""
screens = []
def __init__(self, canvas=None,
shape=_CFG["shape"],
undobuffersize=_CFG["undobuffersize"],
visible=_CFG["visible"]):
if isinstance(canvas, _Screen):
self.screen = canvas
elif isinstance(canvas, TurtleScreen):
if canvas not in RawTurtle.screens:
RawTurtle.screens.append(canvas)
self.screen = canvas
#elif isinstance(canvas, (ScrolledCanvas, Canvas)):
# for screen in RawTurtle.screens:
# if screen.cv == canvas:
# self.screen = screen
# break
# else:
# self.screen = TurtleScreen(canvas)
# RawTurtle.screens.append(self.screen)
else:
raise TurtleGraphicsError("bad canvas argument %s" % canvas)
screen = self.screen
TNavigator.__init__(self, screen.mode())
TPen.__init__(self)
screen._turtles.append(self)
#self.drawingLineItem = screen._createline()
self.turtle = _TurtleImage(screen, shape)
self._poly = None
self._creatingPoly = False
self._fillitem = self._fillpath = None
self._shown = visible
self._hidden_from_screen = False
#self.currentLineItem = screen._createline()
self.currentLine = [self._position]
#self.items = [] #[self.currentLineItem]
self.stampItems = []
self._undobuffersize = undobuffersize
self.undobuffer = None #Tbuffer(undobuffersize)
#self._update()
def reset(self):
"""Delete the turtle's drawings and restore its default values.
No argument.
Delete the turtle's drawings from the screen, re-center the turtle
and set variables to the default values.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00,-22.00)
>>> turtle.heading()
100.0
>>> turtle.reset()
>>> turtle.position()
(0.00,0.00)
>>> turtle.heading()
0.0
"""
TNavigator.reset(self)
TPen._reset(self)
self._clear()
self._drawturtle()
#self._update()
def setundobuffer(self, size):
"""Set or disable undobuffer.
Argument:
size -- an integer or None
If size is an integer an empty undobuffer of given size is installed.
Size gives the maximum number of turtle-actions that can be undone
by the undo() function.
If size is None, no undobuffer is present.
Example (for a Turtle instance named turtle):
>>> turtle.setundobuffer(42)
"""
if size is None:
self.undobuffer = None
else:
self.undobuffer = Tbuffer(size)
def undobufferentries(self):
"""Return count of entries in the undobuffer.
No argument.
Example (for a Turtle instance named turtle):
>>> while undobufferentries():
... undo()
"""
if self.undobuffer is None:
return 0
return self.undobuffer.nr_of_items()
def _clear(self):
"""Delete all of pen's drawings"""
self._fillitem = self._fillpath = None
#for item in self.items:
# self.screen._delete(item)
#self.currentLineItem = #self.screen._createline()
self.currentLine = []
if self._drawing:
self.currentLine.append(self._position)
#self.items = [self.currentLineItem]
self.clearstamps()
#self.setundobuffer(self._undobuffersize)
def clear(self):
"""Delete the turtle's drawings from the screen. Do not move turtle.
No arguments.
Delete the turtle's drawings from the screen. Do not move turtle.
State and position of the turtle as well as drawings of other
turtles are not affected.
Examples (for a Turtle instance named turtle):
>>> turtle.clear()
"""
self._clear()
#self._update()
#def _update_data(self):
# self.screen._incrementudc()
# if self.screen._updatecounter != 0:
# return
# if len(self.currentLine)>1:
# self.screen._drawline(self.currentLineItem, self.currentLine,
# self._pencolor, self._pensize)
def _update(self):
"""Perform a Turtle-data update.
"""
return
screen = self.screen
if screen._tracing == 0:
return
elif screen._tracing == 1:
#self._update_data()
self._drawturtle()
#screen._update() # TurtleScreenBase
#screen._delay(screen._delayvalue) # TurtleScreenBase
else:
#self._update_data()
if screen._updatecounter == 0:
for t in screen.turtles():
t._drawturtle()
#screen._update()
def _tracer(self, flag=None, delay=None):
"""Turns turtle animation on/off and set delay for update drawings.
Optional arguments:
n -- nonnegative integer
delay -- nonnegative integer
If n is given, only each n-th regular screen update is really performed.
(Can be used to accelerate the drawing of complex graphics.)
Second arguments sets delay value (see RawTurtle.delay())
Example (for a Turtle instance named turtle):
>>> turtle.tracer(8, 25)
>>> dist = 2
>>> for i in range(200):
... turtle.fd(dist)
... turtle.rt(90)
... dist += 2
"""
return self.screen.tracer(flag, delay)
def _color(self, args):
return self.screen._color(args)
def _colorstr(self, args):
return self.screen._colorstr(args)
def _cc(self, args):
"""Convert colortriples to hexstrings.
"""
if isinstance(args, str):
return args
try:
r, g, b = args
except:
raise TurtleGraphicsError("bad color arguments: %s" % str(args))
if self.screen._colormode == 1.0:
r, g, b = [round(255.0*x) for x in (r, g, b)]
if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)):
raise TurtleGraphicsError("bad color sequence: %s" % str(args))
return "#%02x%02x%02x" % (r, g, b)
def shape(self, name=None):
"""Set turtle shape to shape with given name / return current shapename.
Optional argument:
name -- a string, which is a valid shapename
Set turtle shape to shape with given name or, if name is not given,
return name of current shape.
Shape with name must exist in the TurtleScreen's shape dictionary.
Initially there are the following polygon shapes:
'arrow', 'turtle', 'circle', 'square', 'triangle', 'classic'.
To learn about how to deal with shapes see Screen-method register_shape.
Example (for a Turtle instance named turtle):
>>> turtle.shape()
'arrow'
>>> turtle.shape("turtle")
>>> turtle.shape()
'turtle'
"""
if name is None:
return self.turtle.shapeIndex
if not name in self.screen.getshapes():
raise TurtleGraphicsError("There is no shape named %s" % name)
self.turtle._setshape(name)
#self._update()
def shapesize(self, stretch_wid=None, stretch_len=None, outline=None):
"""Set/return turtle's stretchfactors/outline. Set resizemode to "user".
Optional arguments:
stretch_wid : positive number
stretch_len : positive number
outline : positive number
Return or set the pen's attributes x/y-stretchfactors and/or outline.
Set resizemode to "user".
If and only if resizemode is set to "user", the turtle will be displayed
stretched according to its stretchfactors:
stretch_wid is stretchfactor perpendicular to orientation
stretch_len is stretchfactor in direction of turtles orientation.
outline determines the width of the shapes's outline.
Examples (for a Turtle instance named turtle):
>>> turtle.resizemode("user")
>>> turtle.shapesize(5, 5, 12)
>>> turtle.shapesize(outline=8)
"""
if stretch_wid is stretch_len is outline is None:
stretch_wid, stretch_len = self._stretchfactor
return stretch_wid, stretch_len, self._outlinewidth
if stretch_wid == 0 or stretch_len == 0:
raise TurtleGraphicsError("stretch_wid/stretch_len must not be zero")
if stretch_wid is not None:
if stretch_len is None:
stretchfactor = stretch_wid, stretch_wid
else:
stretchfactor = stretch_wid, stretch_len
elif stretch_len is not None:
stretchfactor = self._stretchfactor[0], stretch_len
else:
stretchfactor = self._stretchfactor
if outline is None:
outline = self._outlinewidth
self.pen(resizemode="user",
stretchfactor=stretchfactor, outline=outline)
def shearfactor(self, shear=None):
"""Set or return the current shearfactor.
Optional argument: shear -- number, tangent of the shear angle
Shear the turtleshape according to the given shearfactor shear,
which is the tangent of the shear angle. DO NOT change the
turtle's heading (direction of movement).
If shear is not given: return the current shearfactor, i. e. the
tangent of the shear angle, by which lines parallel to the
heading of the turtle are sheared.
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.shearfactor(0.5)
>>> turtle.shearfactor()
>>> 0.5
"""
if shear is None:
return self._shearfactor
self.pen(resizemode="user", shearfactor=shear)
def settiltangle(self, angle):
"""Rotate the turtleshape to point in the specified direction
Argument: angle -- number
Rotate the turtleshape to point in the direction specified by angle,
regardless of its current tilt-angle. DO NOT change the turtle's
heading (direction of movement).
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.settiltangle(45)
>>> stamp()
>>> turtle.fd(50)
>>> turtle.settiltangle(-45)
>>> stamp()
>>> turtle.fd(50)
"""
tilt = -angle * self._degreesPerAU * self._angleOrient
tilt = (tilt * math.pi / 180.0) % (2*math.pi)
self.pen(resizemode="user", tilt=tilt)
def tiltangle(self, angle=None):
"""Set or return the current tilt-angle.
Optional argument: angle -- number
Rotate the turtleshape to point in the direction specified by angle,
regardless of its current tilt-angle. DO NOT change the turtle's
heading (direction of movement).
If angle is not given: return the current tilt-angle, i. e. the angle
between the orientation of the turtleshape and the heading of the
turtle (its direction of movement).
Deprecated since Python 3.1
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.tilt(45)
>>> turtle.tiltangle()
"""
if angle is None:
tilt = -self._tilt * (180.0/math.pi) * self._angleOrient
return (tilt / self._degreesPerAU) % self._fullcircle
else:
self.settiltangle(angle)
def tilt(self, angle):
"""Rotate the turtleshape by angle.
Argument:
angle - a number
Rotate the turtleshape by angle from its current tilt-angle,
but do NOT change the turtle's heading (direction of movement).
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.tilt(30)
>>> turtle.fd(50)
>>> turtle.tilt(30)
>>> turtle.fd(50)
"""
self.settiltangle(angle + self.tiltangle())
def shapetransform(self, t11=None, t12=None, t21=None, t22=None):
"""Set or return the current transformation matrix of the turtle shape.
Optional arguments: t11, t12, t21, t22 -- numbers.
If none of the matrix elements are given, return the transformation
matrix.
Otherwise set the given elements and transform the turtleshape
according to the matrix consisting of first row t11, t12 and
second row t21, 22.
Modify stretchfactor, shearfactor and tiltangle according to the
given matrix.
Examples (for a Turtle instance named turtle):
>>> turtle.shape("square")
>>> turtle.shapesize(4,2)
>>> turtle.shearfactor(-0.5)
>>> turtle.shapetransform()
(4.0, -1.0, -0.0, 2.0)
"""
#console.log("shapetransform")
if t11 is t12 is t21 is t22 is None:
return self._shapetrafo
m11, m12, m21, m22 = self._shapetrafo
if t11 is not None: m11 = t11
if t12 is not None: m12 = t12
if t21 is not None: m21 = t21
if t22 is not None: m22 = t22
if t11 * t22 - t12 * t21 == 0:
raise TurtleGraphicsError("Bad shape transform matrix: must not be singular")
self._shapetrafo = (m11, m12, m21, m22)
alfa = math.atan2(-m21, m11) % (2 * math.pi)
sa, ca = math.sin(alfa), math.cos(alfa)
a11, a12, a21, a22 = (ca*m11 - sa*m21, ca*m12 - sa*m22,
sa*m11 + ca*m21, sa*m12 + ca*m22)
self._stretchfactor = a11, a22
self._shearfactor = a12/a22
self._tilt = alfa
self._update()
def _polytrafo(self, poly):
"""Computes transformed polygon shapes from a shape
according to current position and heading.
"""
screen = self.screen
p0, p1 = self._position
e0, e1 = self._orient
e = Vec2D(e0, e1 * screen.yscale / screen.xscale)
e0, e1 = (1.0 / abs(e)) * e
return [(p0+(e1*x+e0*y)/screen.xscale, p1+(-e0*x+e1*y)/screen.yscale)
for (x, y) in poly]
def get_shapepoly(self):
"""Return the current shape polygon as tuple of coordinate pairs.
No argument.
Examples (for a Turtle instance named turtle):
>>> turtle.shape("square")
>>> turtle.shapetransform(4, -1, 0, 2)
>>> turtle.get_shapepoly()
((50, -20), (30, 20), (-50, 20), (-30, -20))
"""
shape = self.screen._shapes[self.turtle.shapeIndex]
if shape._type == "polygon":
return self._getshapepoly(shape._data, shape._type == "compound")
# else return None
def _getshapepoly(self, polygon, compound=False):
"""Calculate transformed shape polygon according to resizemode
and shapetransform.
"""
if self._resizemode == "user" or compound:
t11, t12, t21, t22 = self._shapetrafo
elif self._resizemode == "auto":
l = max(1, self._pensize/5.0)
t11, t12, t21, t22 = l, 0, 0, l
elif self._resizemode == "noresize":
return polygon
return tuple([(t11*x + t12*y, t21*x + t22*y) for (x, y) in polygon])
def _drawturtle(self):
"""Manages the correct rendering of the turtle with respect to
its shape, resizemode, stretch and tilt etc."""
return
############################## stamp stuff ###############################
def stamp(self):
"""Stamp a copy of the turtleshape onto the canvas and return its id.
No argument.
Stamp a copy of the turtle shape onto the canvas at the current
turtle position. Return a stamp_id for that stamp, which can be
used to delete it by calling clearstamp(stamp_id).
Example (for a Turtle instance named turtle):
>>> turtle.color("blue")
>>> turtle.stamp()
13
>>> turtle.fd(50)
"""
screen = self.screen
shape = screen._shapes[self.turtle.shapeIndex]
ttype = shape._type
tshape = shape._data
if ttype == "polygon":
stitem = screen._createpoly()
if self._resizemode == "noresize": w = 1
elif self._resizemode == "auto": w = self._pensize
else: w =self._outlinewidth
shape = self._polytrafo(self._getshapepoly(tshape))
fc, oc = self._fillcolor, self._pencolor
screen._drawpoly(stitem, shape, fill=fc, outline=oc,
width=w, top=True)
elif ttype == "image":
stitem = screen._createimage("")
screen._drawimage(stitem, self._position, tshape)
elif ttype == "compound":
stitem = []
for element in tshape:
item = screen._createpoly()
stitem.append(item)
stitem = tuple(stitem)
for item, (poly, fc, oc) in zip(stitem, tshape):
poly = self._polytrafo(self._getshapepoly(poly, True))
screen._drawpoly(item, poly, fill=self._cc(fc),
outline=self._cc(oc), width=self._outlinewidth, top=True)
self.stampItems.append(stitem)
self.undobuffer.push(("stamp", stitem))
return stitem
def _clearstamp(self, stampid):
"""does the work for clearstamp() and clearstamps()
"""
if stampid in self.stampItems:
if isinstance(stampid, tuple):
for subitem in stampid:
self.screen._delete(subitem)
else:
self.screen._delete(stampid)
self.stampItems.remove(stampid)
# Delete stampitem from undobuffer if necessary
# if clearstamp is called directly.
item = ("stamp", stampid)
buf = self.undobuffer
if item not in buf.buffer:
return
index = buf.buffer.index(item)
buf.buffer.remove(item)
if index <= buf.ptr:
buf.ptr = (buf.ptr - 1) % buf.bufsize
buf.buffer.insert((buf.ptr+1)%buf.bufsize, [None])
def clearstamp(self, stampid):
"""Delete stamp with given stampid
Argument:
stampid - an integer, must be return value of previous stamp() call.
Example (for a Turtle instance named turtle):
>>> turtle.color("blue")
>>> astamp = turtle.stamp()
>>> turtle.fd(50)
>>> turtle.clearstamp(astamp)
"""
self._clearstamp(stampid)
self._update()
def clearstamps(self, n=None):
"""Delete all or first/last n of turtle's stamps.
Optional argument:
n -- an integer
If n is None, delete all of pen's stamps,
else if n > 0 delete first n stamps
else if n < 0 delete last n stamps.
Example (for a Turtle instance named turtle):
>>> for i in range(8):
... turtle.stamp(); turtle.fd(30)
...
>>> turtle.clearstamps(2)
>>> turtle.clearstamps(-2)
>>> turtle.clearstamps()
"""
if n is None:
toDelete = self.stampItems[:]
elif n >= 0:
toDelete = self.stampItems[:n]
else:
toDelete = self.stampItems[n:]
for item in toDelete:
self._clearstamp(item)
self._update()
def _goto(self, end):
"""Move the pen to the point end, thereby drawing a line
if pen is down. All other methods for turtle movement depend
on this one.
"""
if self._speed and self.screen._tracing == 1:
if self._drawing:
#console.log('%s:%s:%s:%s:%s' % (self, start, end, self._pencolor,
# self._pensize))
self.screen._drawline(self, #please remove me eventually
(self._position, end),
self._pencolor, self._pensize, False)
if isinstance(self._fillpath, list):
self._fillpath.append(end)
###### vererbung!!!!!!!!!!!!!!!!!!!!!!
self._position = end
def _rotate(self, angle):
"""Turns pen clockwise by angle.
"""
#console.log('_rotate')
if self.undobuffer:
self.undobuffer.push(("rot", angle, self._degreesPerAU))
angle *= self._degreesPerAU
neworient = self._orient.rotate(angle)
tracing = self.screen._tracing
self._orient = neworient
#self._update()
def _newLine(self, usePos=True):
"""Closes current line item and starts a new one.
Remark: if current line became too long, animation
performance (via _drawline) slowed down considerably.
"""
#console.log('_newLine')
return
def filling(self):
"""Return fillstate (True if filling, False else).
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.begin_fill()
>>> if turtle.filling():
... turtle.pensize(5)
... else:
... turtle.pensize(3)
"""
return isinstance(self._fillpath, list)
def begin_fill(self):
"""Called just before drawing a shape to be filled.
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.color("black", "red")
>>> turtle.begin_fill()
>>> turtle.circle(60)
>>> turtle.end_fill()
"""
if not self.filling():
self._fillitem = self.screen._createpoly()
#self.items.append(self._fillitem)
self._fillpath = [self._position]
#self._newLine()
if self.undobuffer:
self.undobuffer.push(("beginfill", self._fillitem))
#self._update()
def end_fill(self):
"""Fill the shape drawn after the call begin_fill().
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.color("black", "red")
>>> turtle.begin_fill()
>>> turtle.circle(60)
>>> turtle.end_fill()
"""
if self.filling():
if len(self._fillpath) > 2:
self.screen._drawpoly(self._fillitem, self._fillpath,
fill=self._fillcolor)
if self.undobuffer:
self.undobuffer.push(("dofill", self._fillitem))
self._fillitem = self._fillpath = None
self._update()
def dot(self, size=None, *color):
"""Draw a dot with diameter size, using color.
Optional arguments:
size -- an integer >= 1 (if given)
color -- a colorstring or a numeric color tuple
Draw a circular dot with diameter size, using color.
If size is not given, the maximum of pensize+4 and 2*pensize is used.
Example (for a Turtle instance named turtle):
>>> turtle.dot()
>>> turtle.fd(50); turtle.dot(20, "blue"); turtle.fd(50)
"""
if not color:
if isinstance(size, (str, tuple)):
color = self._colorstr(size)
size = self._pensize + max(self._pensize, 4)
else:
color = self._pencolor
if not size:
size = self._pensize + max(self._pensize, 4)
else:
if size is None:
size = self._pensize + max(self._pensize, 4)
color = self._colorstr(color)
if hasattr(self.screen, "_dot"):
item = self.screen._dot(self._position, size, color)
#self.items.append(item)
if self.undobuffer:
self.undobuffer.push(("dot", item))
else:
pen = self.pen()
if self.undobuffer:
self.undobuffer.push(["seq"])
self.undobuffer.cumulate = True
try:
if self.resizemode() == 'auto':
self.ht()
self.pendown()
self.pensize(size)
self.pencolor(color)
self.forward(0)
finally:
self.pen(pen)
if self.undobuffer:
self.undobuffer.cumulate = False
def _write(self, txt, align, font):
"""Performs the writing for write()
"""
item, end = self.screen._write(self._position, txt, align, font,
self._pencolor)
#self.items.append(item)
if self.undobuffer:
self.undobuffer.push(("wri", item))
return end
def write(self, arg, move=False, align="left", font=("Arial", 8, "normal")):
"""Write text at the current turtle position.
Arguments:
arg -- info, which is to be written to the TurtleScreen
move (optional) -- True/False
align (optional) -- one of the strings "left", "center" or right"
font (optional) -- a triple (fontname, fontsize, fonttype)
Write text - the string representation of arg - at the current
turtle position according to align ("left", "center" or right")
and with the given font.
If move is True, the pen is moved to the bottom-right corner
of the text. By default, move is False.
Example (for a Turtle instance named turtle):
>>> turtle.write('Home = ', True, align="center")
>>> turtle.write((0,0), True)
"""
if self.undobuffer:
self.undobuffer.push(["seq"])
self.undobuffer.cumulate = True
end = self._write(str(arg), align.lower(), font)
if move:
x, y = self.pos()
self.setpos(end, y)
if self.undobuffer:
self.undobuffer.cumulate = False
def begin_poly(self):
"""Start recording the vertices of a polygon.
No argument.
Start recording the vertices of a polygon. Current turtle position
is first point of polygon.
Example (for a Turtle instance named turtle):
>>> turtle.begin_poly()
"""
self._poly = [self._position]
self._creatingPoly = True
def end_poly(self):
"""Stop recording the vertices of a polygon.
No argument.
Stop recording the vertices of a polygon. Current turtle position is
last point of polygon. This will be connected with the first point.
Example (for a Turtle instance named turtle):
>>> turtle.end_poly()
"""
self._creatingPoly = False
def get_poly(self):
"""Return the lastly recorded polygon.
No argument.
Example (for a Turtle instance named turtle):
>>> p = turtle.get_poly()
>>> turtle.register_shape("myFavouriteShape", p)
"""
## check if there is any poly?
if self._poly is not None:
return tuple(self._poly)
def getscreen(self):
"""Return the TurtleScreen object, the turtle is drawing on.
No argument.
Return the TurtleScreen object, the turtle is drawing on.
So TurtleScreen-methods can be called for that object.
Example (for a Turtle instance named turtle):
>>> ts = turtle.getscreen()
>>> ts
<turtle.TurtleScreen object at 0x0106B770>
>>> ts.bgcolor("pink")
"""
return self.screen
def getturtle(self):
"""Return the Turtleobject itself.
No argument.
Only reasonable use: as a function to return the 'anonymous turtle':
Example:
>>> pet = getturtle()
>>> pet.fd(50)
>>> pet
<turtle.Turtle object at 0x0187D810>
>>> turtles()
[<turtle.Turtle object at 0x0187D810>]
"""
return self
getpen = getturtle
################################################################
### screen oriented methods recurring to methods of TurtleScreen
################################################################
def _delay(self, delay=None):
"""Set delay value which determines speed of turtle animation.
"""
return self.screen.delay(delay)
turtlesize = shapesize
RawPen = RawTurtle
### Screen - Singleton ########################
def Screen():
"""Return the singleton screen object.
If none exists at the moment, create a new one and return it,
else return the existing one."""
if Turtle._screen is None:
Turtle._screen = _Screen()
return Turtle._screen
class _Screen(TurtleScreen):
_root = None
_canvas = None
_title = _CFG["title"]
def __init__(self):
# XXX there is no need for this code to be conditional,
# as there will be only a single _Screen instance, anyway
# XXX actually, the turtle demo is injecting root window,
# so perhaps the conditional creation of a root should be
# preserved (perhaps by passing it as an optional parameter)
if _Screen._root is None:
_Screen._root = self._root = _Root()
#self._root.title(_Screen._title)
#self._root.ondestroy(self._destroy)
if _Screen._canvas is None:
width = _CFG["width"]
height = _CFG["height"]
canvwidth = _CFG["canvwidth"]
canvheight = _CFG["canvheight"]
leftright = _CFG["leftright"]
topbottom = _CFG["topbottom"]
self._root.setupcanvas(width, height, canvwidth, canvheight)
_Screen._canvas = self._root._getcanvas()
TurtleScreen.__init__(self, _Screen._canvas)
self.setup(width, height, leftright, topbottom)
def end(self):
self._root.end()
def setup(self, width=_CFG["width"], height=_CFG["height"],
startx=_CFG["leftright"], starty=_CFG["topbottom"]):
""" Set the size and position of the main window.
Arguments:
width: as integer a size in pixels, as float a fraction of the screen.
Default is 50% of screen.
height: as integer the height in pixels, as float a fraction of the
screen. Default is 75% of screen.
startx: if positive, starting position in pixels from the left
edge of the screen, if negative from the right edge
Default, startx=None is to center window horizontally.
starty: if positive, starting position in pixels from the top
edge of the screen, if negative from the bottom edge
Default, starty=None is to center window vertically.
Examples (for a Screen instance named screen):
>>> screen.setup (width=200, height=200, startx=0, starty=0)
sets window to 200x200 pixels, in upper left of screen
>>> screen.setup(width=.75, height=0.5, startx=None, starty=None)
sets window to 75% of screen by 50% of screen and centers
"""
if not hasattr(self._root, "set_geometry"):
return
sw = self._root.win_width()
sh = self._root.win_height()
if isinstance(width, float) and 0 <= width <= 1:
width = sw*width
if startx is None:
startx = (sw - width) / 2
if isinstance(height, float) and 0 <= height <= 1:
height = sh*height
if starty is None:
starty = (sh - height) / 2
self._root.set_geometry(width, height, startx, starty)
self.update()
class Turtle(RawTurtle):
"""RawTurtle auto-creating (scrolled) canvas.
When a Turtle object is created or a function derived from some
Turtle method is called a TurtleScreen object is automatically created.
"""
_pen = None
_screen = None
def __init__(self,
shape=_CFG["shape"],
undobuffersize=_CFG["undobuffersize"],
visible=_CFG["visible"]):
if Turtle._screen is None:
Turtle._screen = Screen()
RawTurtle.__init__(self, Turtle._screen,
shape=shape,
undobuffersize=undobuffersize,
visible=visible)
Pen = Turtle
def _getpen():
"""Create the 'anonymous' turtle if not already present."""
if Turtle._pen is None:
Turtle._pen = Turtle()
return Turtle._pen
def _getscreen():
"""Create a TurtleScreen if not already present."""
if Turtle._screen is None:
Turtle._screen = Screen()
return Turtle._screen
if __name__ == "__main__":
def switchpen():
if isdown():
pu()
else:
pd()
def demo1():
"""Demo of old turtle.py - module"""
reset()
tracer(True)
up()
backward(100)
down()
# draw 3 squares; the last filled
width(3)
for i in range(3):
if i == 2:
begin_fill()
for _ in range(4):
forward(20)
left(90)
if i == 2:
color("maroon")
end_fill()
up()
forward(30)
down()
width(1)
color("black")
# move out of the way
tracer(False)
up()
right(90)
forward(100)
right(90)
forward(100)
right(180)
down()
# some text
write("startstart", 1)
write("start", 1)
color("red")
# staircase
for i in range(5):
forward(20)
left(90)
forward(20)
right(90)
# filled staircase
tracer(True)
begin_fill()
for i in range(5):
forward(20)
left(90)
forward(20)
right(90)
end_fill()
# more text
def demo2():
"""Demo of some new features."""
speed(1)
st()
pensize(3)
setheading(towards(0, 0))
radius = distance(0, 0)/2.0
rt(90)
for _ in range(18):
switchpen()
circle(radius, 10)
write("wait a moment...")
while undobufferentries():
undo()
reset()
lt(90)
colormode(255)
laenge = 10
pencolor("green")
pensize(3)
lt(180)
for i in range(-2, 16):
if i > 0:
begin_fill()
fillcolor(255-15*i, 0, 15*i)
for _ in range(3):
fd(laenge)
lt(120)
end_fill()
laenge += 10
lt(15)
speed((speed()+1)%12)
#end_fill()
lt(120)
pu()
fd(70)
rt(30)
pd()
color("red","yellow")
speed(0)
begin_fill()
for _ in range(4):
circle(50, 90)
rt(90)
fd(30)
rt(90)
end_fill()
lt(90)
pu()
fd(30)
pd()
shape("turtle")
tri = getturtle()
tri.resizemode("auto")
turtle = Turtle()
turtle.resizemode("auto")
turtle.shape("turtle")
turtle.reset()
turtle.left(90)
turtle.speed(0)
turtle.up()
turtle.goto(280, 40)
turtle.lt(30)
turtle.down()
turtle.speed(6)
turtle.color("blue","orange")
turtle.pensize(2)
tri.speed(6)
setheading(towards(turtle))
count = 1
while tri.distance(turtle) > 4:
turtle.fd(3.5)
turtle.lt(0.6)
tri.setheading(tri.towards(turtle))
tri.fd(4)
if count % 20 == 0:
turtle.stamp()
tri.stamp()
switchpen()
count += 1
tri.write("CAUGHT! ", font=("Arial", 16, "bold"), align="right")
tri.pencolor("black")
tri.pencolor("red")
def baba(xdummy, ydummy):
clearscreen()
bye()
time.sleep(2)
while undobufferentries():
tri.undo()
turtle.undo()
tri.fd(50)
tri.write(" Click me!", font = ("Courier", 12, "bold") )
tri.onclick(baba, 1)
demo1()
demo2()
exitonclick()
| gpl-3.0 |
Tesora-Release/tesora-trove | trove/flavor/views.py | 5 | 1903 | # Copyright 2010-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from trove.common import cfg
from trove.common.views import create_links
CONF = cfg.CONF
class FlavorView(object):
def __init__(self, flavor, req=None):
self.flavor = flavor
self.req = req
def data(self):
# If the flavor id cannot be cast to an int, we simply return
# no id and rely on str_id instead.
try:
f_id = int(self.flavor.id)
except ValueError:
f_id = None
flavor = {
'id': f_id,
'links': self._build_links(),
'name': self.flavor.name,
'ram': self.flavor.ram,
'str_id': str(self.flavor.id),
}
if not CONF.trove_volume_support and CONF.device_path is not None:
flavor['local_storage'] = self.flavor.ephemeral
return {"flavor": flavor}
def _build_links(self):
return create_links("flavors", self.req, self.flavor.id)
class FlavorsView(object):
view = FlavorView
def __init__(self, flavors, req=None):
self.flavors = flavors
self.req = req
def data(self):
data = []
for flavor in self.flavors:
data.append(self.view(flavor, req=self.req).data()['flavor'])
return {"flavors": data}
| apache-2.0 |
peplin/flask-jsonschema | setup.py | 2 | 1040 | """
Flask-JsonSchema
----------
A Flask extension for validating JSON requets with jsonschema
"""
from setuptools import setup
setup(
name='Flask-JsonSchema',
version='0.1.0',
url='https://github.com/mattupstate/flask-jsonschema',
license='MIT',
author='Matt Wright',
author_email='matt@nobien.net',
description='Flask extension for validating JSON requets',
long_description=__doc__,
py_modules=['flask_jsonschema'],
test_suite='nose.collector',
zip_safe=False,
platforms='any',
install_requires=['Flask>=0.9', 'jsonschema>=1.1.0'],
tests_require=['nose'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| mit |
guewen/odoo | addons/sale_crm/__init__.py | 353 | 1076 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
import sale_crm
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
cjaffar/jaffarchiosa | jaffarchiosa/lib/python2.7/site-packages/setuptools/py31compat.py | 570 | 1637 | import sys
import unittest
__all__ = ['get_config_vars', 'get_path']
try:
# Python 2.7 or >=3.2
from sysconfig import get_config_vars, get_path
except ImportError:
from distutils.sysconfig import get_config_vars, get_python_lib
def get_path(name):
if name not in ('platlib', 'purelib'):
raise ValueError("Name must be purelib or platlib")
return get_python_lib(name=='platlib')
try:
# Python >=3.2
from tempfile import TemporaryDirectory
except ImportError:
import shutil
import tempfile
class TemporaryDirectory(object):
""""
Very simple temporary directory context manager.
Will try to delete afterward, but will also ignore OS and similar
errors on deletion.
"""
def __init__(self):
self.name = None # Handle mkdtemp raising an exception
self.name = tempfile.mkdtemp()
def __enter__(self):
return self.name
def __exit__(self, exctype, excvalue, exctrace):
try:
shutil.rmtree(self.name, True)
except OSError: #removal errors are not the only possible
pass
self.name = None
unittest_main = unittest.main
_PY31 = (3, 1) <= sys.version_info[:2] < (3, 2)
if _PY31:
# on Python 3.1, translate testRunner==None to TextTestRunner
# for compatibility with Python 2.6, 2.7, and 3.2+
def unittest_main(*args, **kwargs):
if 'testRunner' in kwargs and kwargs['testRunner'] is None:
kwargs['testRunner'] = unittest.TextTestRunner
return unittest.main(*args, **kwargs)
| mit |
shiora/The-Perfect-Pokemon-Team-Balancer | libs/env/Lib/site-packages/sqlalchemy/orm/strategies.py | 75 | 55884 | # orm/strategies.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""sqlalchemy.orm.interfaces.LoaderStrategy
implementations, and related MapperOptions."""
from .. import exc as sa_exc, inspect
from .. import util, log, event
from ..sql import util as sql_util, visitors
from .. import sql
from . import (
attributes, interfaces, exc as orm_exc, loading,
unitofwork, util as orm_util
)
from .state import InstanceState
from .util import _none_set
from . import properties
from .interfaces import (
LoaderStrategy, StrategizedProperty
)
from .session import _state_session
import itertools
def _register_attribute(strategy, mapper, useobject,
compare_function=None,
typecallable=None,
uselist=False,
callable_=None,
proxy_property=None,
active_history=False,
impl_class=None,
**kw
):
prop = strategy.parent_property
attribute_ext = list(util.to_list(prop.extension, default=[]))
listen_hooks = []
if useobject and prop.single_parent:
listen_hooks.append(single_parent_validator)
if prop.key in prop.parent.validators:
fn, opts = prop.parent.validators[prop.key]
listen_hooks.append(
lambda desc, prop: orm_util._validator_events(desc,
prop.key, fn, **opts)
)
if useobject:
listen_hooks.append(unitofwork.track_cascade_events)
# need to assemble backref listeners
# after the singleparentvalidator, mapper validator
backref = kw.pop('backref', None)
if backref:
listen_hooks.append(
lambda desc, prop: attributes.backref_listeners(desc,
backref,
uselist)
)
for m in mapper.self_and_descendants:
if prop is m._props.get(prop.key):
desc = attributes.register_attribute_impl(
m.class_,
prop.key,
parent_token=prop,
uselist=uselist,
compare_function=compare_function,
useobject=useobject,
extension=attribute_ext,
trackparent=useobject and (prop.single_parent
or prop.direction is interfaces.ONETOMANY),
typecallable=typecallable,
callable_=callable_,
active_history=active_history,
impl_class=impl_class,
send_modified_events=not useobject or not prop.viewonly,
doc=prop.doc,
**kw
)
for hook in listen_hooks:
hook(desc, prop)
@properties.ColumnProperty.strategy_for(instrument=False, deferred=False)
class UninstrumentedColumnLoader(LoaderStrategy):
"""Represent the a non-instrumented MapperProperty.
The polymorphic_on argument of mapper() often results in this,
if the argument is against the with_polymorphic selectable.
"""
def __init__(self, parent):
super(UninstrumentedColumnLoader, self).__init__(parent)
self.columns = self.parent_property.columns
def setup_query(self, context, entity, path, loadopt, adapter,
column_collection=None, **kwargs):
for c in self.columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
def create_row_processor(self, context, path, loadopt, mapper, row, adapter):
return None, None, None
@log.class_logger
@properties.ColumnProperty.strategy_for(instrument=True, deferred=False)
class ColumnLoader(LoaderStrategy):
"""Provide loading behavior for a :class:`.ColumnProperty`."""
def __init__(self, parent):
super(ColumnLoader, self).__init__(parent)
self.columns = self.parent_property.columns
self.is_composite = hasattr(self.parent_property, 'composite_class')
def setup_query(self, context, entity, path, loadopt,
adapter, column_collection, **kwargs):
for c in self.columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
def init_class_attribute(self, mapper):
self.is_class_level = True
coltype = self.columns[0].type
# TODO: check all columns ? check for foreign key as well?
active_history = self.parent_property.active_history or \
self.columns[0].primary_key or \
mapper.version_id_col in set(self.columns)
_register_attribute(self, mapper, useobject=False,
compare_function=coltype.compare_values,
active_history=active_history
)
def create_row_processor(self, context, path,
loadopt, mapper, row, adapter):
key = self.key
# look through list of columns represented here
# to see which, if any, is present in the row.
for col in self.columns:
if adapter:
col = adapter.columns[col]
if col is not None and col in row:
def fetch_col(state, dict_, row):
dict_[key] = row[col]
return fetch_col, None, None
else:
def expire_for_non_present_col(state, dict_, row):
state._expire_attribute_pre_commit(dict_, key)
return expire_for_non_present_col, None, None
@log.class_logger
@properties.ColumnProperty.strategy_for(deferred=True, instrument=True)
class DeferredColumnLoader(LoaderStrategy):
"""Provide loading behavior for a deferred :class:`.ColumnProperty`."""
def __init__(self, parent):
super(DeferredColumnLoader, self).__init__(parent)
if hasattr(self.parent_property, 'composite_class'):
raise NotImplementedError("Deferred loading for composite "
"types not implemented yet")
self.columns = self.parent_property.columns
self.group = self.parent_property.group
def create_row_processor(self, context, path, loadopt, mapper, row, adapter):
col = self.columns[0]
if adapter:
col = adapter.columns[col]
key = self.key
if col in row:
return self.parent_property._get_strategy_by_cls(ColumnLoader).\
create_row_processor(
context, path, loadopt, mapper, row, adapter)
elif not self.is_class_level:
set_deferred_for_local_state = InstanceState._row_processor(
mapper.class_manager,
LoadDeferredColumns(key), key)
return set_deferred_for_local_state, None, None
else:
def reset_col_for_deferred(state, dict_, row):
# reset state on the key so that deferred callables
# fire off on next access.
state._reset(dict_, key)
return reset_col_for_deferred, None, None
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(self, mapper, useobject=False,
compare_function=self.columns[0].type.compare_values,
callable_=self._load_for_state,
expire_missing=False
)
def setup_query(self, context, entity, path, loadopt, adapter,
only_load_props=None, **kwargs):
if (
loadopt and self.group and
loadopt.local_opts.get('undefer_group', False) == self.group
) or (only_load_props and self.key in only_load_props):
self.parent_property._get_strategy_by_cls(ColumnLoader).\
setup_query(context, entity,
path, loadopt, adapter, **kwargs)
def _load_for_state(self, state, passive):
if not state.key:
return attributes.ATTR_EMPTY
if not passive & attributes.SQL_OK:
return attributes.PASSIVE_NO_RESULT
localparent = state.manager.mapper
if self.group:
toload = [
p.key for p in
localparent.iterate_properties
if isinstance(p, StrategizedProperty) and
isinstance(p.strategy, DeferredColumnLoader) and
p.group == self.group
]
else:
toload = [self.key]
# narrow the keys down to just those which have no history
group = [k for k in toload if k in state.unmodified]
session = _state_session(state)
if session is None:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"deferred load operation of attribute '%s' cannot proceed" %
(orm_util.state_str(state), self.key)
)
query = session.query(localparent)
if loading.load_on_ident(query, state.key,
only_load_props=group, refresh_state=state) is None:
raise orm_exc.ObjectDeletedError(state)
return attributes.ATTR_WAS_SET
class LoadDeferredColumns(object):
"""serializable loader object used by DeferredColumnLoader"""
def __init__(self, key):
self.key = key
def __call__(self, state, passive=attributes.PASSIVE_OFF):
key = self.key
localparent = state.manager.mapper
prop = localparent._props[key]
strategy = prop._strategies[DeferredColumnLoader]
return strategy._load_for_state(state, passive)
class AbstractRelationshipLoader(LoaderStrategy):
"""LoaderStratgies which deal with related objects."""
def __init__(self, parent):
super(AbstractRelationshipLoader, self).__init__(parent)
self.mapper = self.parent_property.mapper
self.target = self.parent_property.target
self.uselist = self.parent_property.uselist
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="noload")
@properties.RelationshipProperty.strategy_for(lazy=None)
class NoLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
with "lazy=None".
"""
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(self, mapper,
useobject=True,
uselist=self.parent_property.uselist,
typecallable=self.parent_property.collection_class,
)
def create_row_processor(self, context, path, loadopt, mapper, row, adapter):
def invoke_no_load(state, dict_, row):
state._initialize(self.key)
return invoke_no_load, None, None
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy=True)
@properties.RelationshipProperty.strategy_for(lazy="select")
class LazyLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
with "lazy=True", that is loads when first accessed.
"""
def __init__(self, parent):
super(LazyLoader, self).__init__(parent)
join_condition = self.parent_property._join_condition
self._lazywhere, \
self._bind_to_col, \
self._equated_columns = join_condition.create_lazy_clause()
self._rev_lazywhere, \
self._rev_bind_to_col, \
self._rev_equated_columns = join_condition.create_lazy_clause(
reverse_direction=True)
self.logger.info("%s lazy loading clause %s", self, self._lazywhere)
# determine if our "lazywhere" clause is the same as the mapper's
# get() clause. then we can just use mapper.get()
self.use_get = not self.uselist and \
self.mapper._get_clause[0].compare(
self._lazywhere,
use_proxies=True,
equivalents=self.mapper._equivalent_columns
)
if self.use_get:
for col in list(self._equated_columns):
if col in self.mapper._equivalent_columns:
for c in self.mapper._equivalent_columns[col]:
self._equated_columns[c] = self._equated_columns[col]
self.logger.info("%s will use query.get() to "
"optimize instance loads" % self)
def init_class_attribute(self, mapper):
self.is_class_level = True
active_history = (
self.parent_property.active_history or
self.parent_property.direction is not interfaces.MANYTOONE or
not self.use_get
)
# MANYTOONE currently only needs the
# "old" value for delete-orphan
# cascades. the required _SingleParentValidator
# will enable active_history
# in that case. otherwise we don't need the
# "old" value during backref operations.
_register_attribute(self,
mapper,
useobject=True,
callable_=self._load_for_state,
uselist=self.parent_property.uselist,
backref=self.parent_property.back_populates,
typecallable=self.parent_property.collection_class,
active_history=active_history
)
def lazy_clause(self, state, reverse_direction=False,
alias_secondary=False,
adapt_source=None,
passive=None):
if state is None:
return self._lazy_none_clause(
reverse_direction,
adapt_source=adapt_source)
if not reverse_direction:
criterion, bind_to_col, rev = \
self._lazywhere, \
self._bind_to_col, \
self._equated_columns
else:
criterion, bind_to_col, rev = \
self._rev_lazywhere, \
self._rev_bind_to_col, \
self._rev_equated_columns
if reverse_direction:
mapper = self.parent_property.mapper
else:
mapper = self.parent_property.parent
o = state.obj() # strong ref
dict_ = attributes.instance_dict(o)
# use the "committed state" only if we're in a flush
# for this state.
if passive and passive & attributes.LOAD_AGAINST_COMMITTED:
def visit_bindparam(bindparam):
if bindparam._identifying_key in bind_to_col:
bindparam.callable = \
lambda: mapper._get_committed_state_attr_by_column(
state, dict_,
bind_to_col[bindparam._identifying_key])
else:
def visit_bindparam(bindparam):
if bindparam._identifying_key in bind_to_col:
bindparam.callable = \
lambda: mapper._get_state_attr_by_column(
state, dict_,
bind_to_col[bindparam._identifying_key])
if self.parent_property.secondary is not None and alias_secondary:
criterion = sql_util.ClauseAdapter(
self.parent_property.secondary.alias()).\
traverse(criterion)
criterion = visitors.cloned_traverse(
criterion, {}, {'bindparam': visit_bindparam})
if adapt_source:
criterion = adapt_source(criterion)
return criterion
def _lazy_none_clause(self, reverse_direction=False, adapt_source=None):
if not reverse_direction:
criterion, bind_to_col, rev = \
self._lazywhere, \
self._bind_to_col,\
self._equated_columns
else:
criterion, bind_to_col, rev = \
self._rev_lazywhere, \
self._rev_bind_to_col, \
self._rev_equated_columns
criterion = sql_util.adapt_criterion_to_null(criterion, bind_to_col)
if adapt_source:
criterion = adapt_source(criterion)
return criterion
def _load_for_state(self, state, passive):
if not state.key and \
(
(
not self.parent_property.load_on_pending
and not state._load_pending
)
or not state.session_id
):
return attributes.ATTR_EMPTY
pending = not state.key
ident_key = None
if (
(not passive & attributes.SQL_OK and not self.use_get)
or
(not passive & attributes.NON_PERSISTENT_OK and pending)
):
return attributes.PASSIVE_NO_RESULT
session = _state_session(state)
if not session:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"lazy load operation of attribute '%s' cannot proceed" %
(orm_util.state_str(state), self.key)
)
# if we have a simple primary key load, check the
# identity map without generating a Query at all
if self.use_get:
ident = self._get_ident_for_use_get(
session,
state,
passive
)
if attributes.PASSIVE_NO_RESULT in ident:
return attributes.PASSIVE_NO_RESULT
elif attributes.NEVER_SET in ident:
return attributes.NEVER_SET
if _none_set.issuperset(ident):
return None
ident_key = self.mapper.identity_key_from_primary_key(ident)
instance = loading.get_from_identity(session, ident_key, passive)
if instance is not None:
return instance
elif not passive & attributes.SQL_OK or \
not passive & attributes.RELATED_OBJECT_OK:
return attributes.PASSIVE_NO_RESULT
return self._emit_lazyload(session, state, ident_key, passive)
def _get_ident_for_use_get(self, session, state, passive):
instance_mapper = state.manager.mapper
if passive & attributes.LOAD_AGAINST_COMMITTED:
get_attr = instance_mapper._get_committed_state_attr_by_column
else:
get_attr = instance_mapper._get_state_attr_by_column
dict_ = state.dict
return [
get_attr(
state,
dict_,
self._equated_columns[pk],
passive=passive)
for pk in self.mapper.primary_key
]
@util.dependencies("sqlalchemy.orm.strategy_options")
def _emit_lazyload(self, strategy_options, session, state, ident_key, passive):
q = session.query(self.mapper)._adapt_all_clauses()
if self.parent_property.secondary is not None:
q = q.select_from(self.mapper, self.parent_property.secondary)
q = q._with_invoke_all_eagers(False)
pending = not state.key
# don't autoflush on pending
if pending or passive & attributes.NO_AUTOFLUSH:
q = q.autoflush(False)
if state.load_path:
q = q._with_current_path(state.load_path[self.parent_property])
if state.load_options:
q = q._conditional_options(*state.load_options)
if self.use_get:
return loading.load_on_ident(q, ident_key)
if self.parent_property.order_by:
q = q.order_by(*util.to_list(self.parent_property.order_by))
for rev in self.parent_property._reverse_property:
# reverse props that are MANYTOONE are loading *this*
# object from get(), so don't need to eager out to those.
if rev.direction is interfaces.MANYTOONE and \
rev._use_get and \
not isinstance(rev.strategy, LazyLoader):
q = q.options(strategy_options.Load(rev.parent).lazyload(rev.key))
lazy_clause = self.lazy_clause(state, passive=passive)
if pending:
bind_values = sql_util.bind_values(lazy_clause)
if None in bind_values:
return None
q = q.filter(lazy_clause)
result = q.all()
if self.uselist:
return result
else:
l = len(result)
if l:
if l > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for lazily-loaded attribute '%s' "
% self.parent_property)
return result[0]
else:
return None
def create_row_processor(self, context, path, loadopt,
mapper, row, adapter):
key = self.key
if not self.is_class_level:
# we are not the primary manager for this attribute
# on this class - set up a
# per-instance lazyloader, which will override the
# class-level behavior.
# this currently only happens when using a
# "lazyload" option on a "no load"
# attribute - "eager" attributes always have a
# class-level lazyloader installed.
set_lazy_callable = InstanceState._row_processor(
mapper.class_manager,
LoadLazyAttribute(key), key)
return set_lazy_callable, None, None
else:
def reset_for_lazy_callable(state, dict_, row):
# we are the primary manager for this attribute on
# this class - reset its
# per-instance attribute state, so that the class-level
# lazy loader is
# executed when next referenced on this instance.
# this is needed in
# populate_existing() types of scenarios to reset
# any existing state.
state._reset(dict_, key)
return reset_for_lazy_callable, None, None
class LoadLazyAttribute(object):
"""serializable loader object used by LazyLoader"""
def __init__(self, key):
self.key = key
def __call__(self, state, passive=attributes.PASSIVE_OFF):
key = self.key
instance_mapper = state.manager.mapper
prop = instance_mapper._props[key]
strategy = prop._strategies[LazyLoader]
return strategy._load_for_state(state, passive)
@properties.RelationshipProperty.strategy_for(lazy="immediate")
class ImmediateLoader(AbstractRelationshipLoader):
def init_class_attribute(self, mapper):
self.parent_property.\
_get_strategy_by_cls(LazyLoader).\
init_class_attribute(mapper)
def setup_query(self, context, entity,
path, loadopt, adapter, column_collection=None,
parentmapper=None, **kwargs):
pass
def create_row_processor(self, context, path, loadopt,
mapper, row, adapter):
def load_immediate(state, dict_, row):
state.get_impl(self.key).get(state, dict_)
return None, None, load_immediate
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="subquery")
class SubqueryLoader(AbstractRelationshipLoader):
def __init__(self, parent):
super(SubqueryLoader, self).__init__(parent)
self.join_depth = self.parent_property.join_depth
def init_class_attribute(self, mapper):
self.parent_property.\
_get_strategy_by_cls(LazyLoader).\
init_class_attribute(mapper)
def setup_query(self, context, entity,
path, loadopt, adapter,
column_collection=None,
parentmapper=None, **kwargs):
if not context.query._enable_eagerloads:
return
path = path[self.parent_property]
# build up a path indicating the path from the leftmost
# entity to the thing we're subquery loading.
with_poly_info = path.get(context.attributes,
"path_with_polymorphic", None)
if with_poly_info is not None:
effective_entity = with_poly_info.entity
else:
effective_entity = self.mapper
subq_path = context.attributes.get(('subquery_path', None),
orm_util.PathRegistry.root)
subq_path = subq_path + path
# if not via query option, check for
# a cycle
if not path.contains(context.attributes, "loader"):
if self.join_depth:
if path.length / 2 > self.join_depth:
return
elif subq_path.contains_mapper(self.mapper):
return
subq_mapper, leftmost_mapper, leftmost_attr, leftmost_relationship = \
self._get_leftmost(subq_path)
orig_query = context.attributes.get(
("orig_query", SubqueryLoader),
context.query)
# generate a new Query from the original, then
# produce a subquery from it.
left_alias = self._generate_from_original_query(
orig_query, leftmost_mapper,
leftmost_attr, leftmost_relationship,
entity.mapper
)
# generate another Query that will join the
# left alias to the target relationships.
# basically doing a longhand
# "from_self()". (from_self() itself not quite industrial
# strength enough for all contingencies...but very close)
q = orig_query.session.query(effective_entity)
q._attributes = {
("orig_query", SubqueryLoader): orig_query,
('subquery_path', None): subq_path
}
q = q._enable_single_crit(False)
to_join, local_attr, parent_alias = \
self._prep_for_joins(left_alias, subq_path)
q = q.order_by(*local_attr)
q = q.add_columns(*local_attr)
q = self._apply_joins(q, to_join, left_alias,
parent_alias, effective_entity)
q = self._setup_options(q, subq_path, orig_query, effective_entity)
q = self._setup_outermost_orderby(q)
# add new query to attributes to be picked up
# by create_row_processor
path.set(context.attributes, "subquery", q)
def _get_leftmost(self, subq_path):
subq_path = subq_path.path
subq_mapper = orm_util._class_to_mapper(subq_path[0])
# determine attributes of the leftmost mapper
if self.parent.isa(subq_mapper) and self.parent_property is subq_path[1]:
leftmost_mapper, leftmost_prop = \
self.parent, self.parent_property
else:
leftmost_mapper, leftmost_prop = \
subq_mapper, \
subq_path[1]
leftmost_cols = leftmost_prop.local_columns
leftmost_attr = [
leftmost_mapper._columntoproperty[c].class_attribute
for c in leftmost_cols
]
return subq_mapper, leftmost_mapper, leftmost_attr, leftmost_prop
def _generate_from_original_query(self,
orig_query, leftmost_mapper,
leftmost_attr, leftmost_relationship,
entity_mapper
):
# reformat the original query
# to look only for significant columns
q = orig_query._clone().correlate(None)
# set a real "from" if not present, as this is more
# accurate than just going off of the column expression
if not q._from_obj and entity_mapper.isa(leftmost_mapper):
q._set_select_from([entity_mapper], False)
target_cols = q._adapt_col_list(leftmost_attr)
# select from the identity columns of the outer
q._set_entities(target_cols)
distinct_target_key = leftmost_relationship.distinct_target_key
if distinct_target_key is True:
q._distinct = True
elif distinct_target_key is None:
# if target_cols refer to a non-primary key or only
# part of a composite primary key, set the q as distinct
for t in set(c.table for c in target_cols):
if not set(target_cols).issuperset(t.primary_key):
q._distinct = True
break
if q._order_by is False:
q._order_by = leftmost_mapper.order_by
# don't need ORDER BY if no limit/offset
if q._limit is None and q._offset is None:
q._order_by = None
# the original query now becomes a subquery
# which we'll join onto.
embed_q = q.with_labels().subquery()
left_alias = orm_util.AliasedClass(leftmost_mapper, embed_q,
use_mapper_path=True)
return left_alias
def _prep_for_joins(self, left_alias, subq_path):
# figure out what's being joined. a.k.a. the fun part
to_join = []
pairs = list(subq_path.pairs())
for i, (mapper, prop) in enumerate(pairs):
if i > 0:
# look at the previous mapper in the chain -
# if it is as or more specific than this prop's
# mapper, use that instead.
# note we have an assumption here that
# the non-first element is always going to be a mapper,
# not an AliasedClass
prev_mapper = pairs[i - 1][1].mapper
to_append = prev_mapper if prev_mapper.isa(mapper) else mapper
else:
to_append = mapper
to_join.append((to_append, prop.key))
# determine the immediate parent class we are joining from,
# which needs to be aliased.
if len(to_join) > 1:
info = inspect(to_join[-1][0])
if len(to_join) < 2:
# in the case of a one level eager load, this is the
# leftmost "left_alias".
parent_alias = left_alias
elif info.mapper.isa(self.parent):
# In the case of multiple levels, retrieve
# it from subq_path[-2]. This is the same as self.parent
# in the vast majority of cases, and [ticket:2014]
# illustrates a case where sub_path[-2] is a subclass
# of self.parent
parent_alias = orm_util.AliasedClass(to_join[-1][0],
use_mapper_path=True)
else:
# if of_type() were used leading to this relationship,
# self.parent is more specific than subq_path[-2]
parent_alias = orm_util.AliasedClass(self.parent,
use_mapper_path=True)
local_cols = self.parent_property.local_columns
local_attr = [
getattr(parent_alias, self.parent._columntoproperty[c].key)
for c in local_cols
]
return to_join, local_attr, parent_alias
def _apply_joins(self, q, to_join, left_alias, parent_alias,
effective_entity):
for i, (mapper, key) in enumerate(to_join):
# we need to use query.join() as opposed to
# orm.join() here because of the
# rich behavior it brings when dealing with
# "with_polymorphic" mappers. "aliased"
# and "from_joinpoint" take care of most of
# the chaining and aliasing for us.
first = i == 0
middle = i < len(to_join) - 1
second_to_last = i == len(to_join) - 2
last = i == len(to_join) - 1
if first:
attr = getattr(left_alias, key)
if last and effective_entity is not self.mapper:
attr = attr.of_type(effective_entity)
else:
if last and effective_entity is not self.mapper:
attr = getattr(parent_alias, key).\
of_type(effective_entity)
else:
attr = key
if second_to_last:
q = q.join(parent_alias, attr, from_joinpoint=True)
else:
q = q.join(attr, aliased=middle, from_joinpoint=True)
return q
def _setup_options(self, q, subq_path, orig_query, effective_entity):
# propagate loader options etc. to the new query.
# these will fire relative to subq_path.
q = q._with_current_path(subq_path)
q = q._conditional_options(*orig_query._with_options)
if orig_query._populate_existing:
q._populate_existing = orig_query._populate_existing
return q
def _setup_outermost_orderby(self, q):
if self.parent_property.order_by:
# if there's an ORDER BY, alias it the same
# way joinedloader does, but we have to pull out
# the "eagerjoin" from the query.
# this really only picks up the "secondary" table
# right now.
eagerjoin = q._from_obj[0]
eager_order_by = \
eagerjoin._target_adapter.\
copy_and_process(
util.to_list(
self.parent_property.order_by
)
)
q = q.order_by(*eager_order_by)
return q
class _SubqCollections(object):
"""Given a :class:`.Query` used to emit the "subquery load",
provide a load interface that executes the query at the
first moment a value is needed.
"""
_data = None
def __init__(self, subq):
self.subq = subq
def get(self, key, default):
if self._data is None:
self._load()
return self._data.get(key, default)
def _load(self):
self._data = dict(
(k, [vv[0] for vv in v])
for k, v in itertools.groupby(
self.subq,
lambda x: x[1:]
)
)
def loader(self, state, dict_, row):
if self._data is None:
self._load()
def create_row_processor(self, context, path, loadopt,
mapper, row, adapter):
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." %
self)
path = path[self.parent_property]
subq = path.get(context.attributes, 'subquery')
if subq is None:
return None, None, None
local_cols = self.parent_property.local_columns
# cache the loaded collections in the context
# so that inheriting mappers don't re-load when they
# call upon create_row_processor again
collections = path.get(context.attributes, "collections")
if collections is None:
collections = self._SubqCollections(subq)
path.set(context.attributes, 'collections', collections)
if adapter:
local_cols = [adapter.columns[c] for c in local_cols]
if self.uselist:
return self._create_collection_loader(collections, local_cols)
else:
return self._create_scalar_loader(collections, local_cols)
def _create_collection_loader(self, collections, local_cols):
def load_collection_from_subq(state, dict_, row):
collection = collections.get(
tuple([row[col] for col in local_cols]),
()
)
state.get_impl(self.key).\
set_committed_value(state, dict_, collection)
return load_collection_from_subq, None, None, collections.loader
def _create_scalar_loader(self, collections, local_cols):
def load_scalar_from_subq(state, dict_, row):
collection = collections.get(
tuple([row[col] for col in local_cols]),
(None,)
)
if len(collection) > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded attribute '%s' "
% self)
scalar = collection[0]
state.get_impl(self.key).\
set_committed_value(state, dict_, scalar)
return load_scalar_from_subq, None, None, collections.loader
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="joined")
@properties.RelationshipProperty.strategy_for(lazy=False)
class JoinedLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
using joined eager loading.
"""
def __init__(self, parent):
super(JoinedLoader, self).__init__(parent)
self.join_depth = self.parent_property.join_depth
def init_class_attribute(self, mapper):
self.parent_property.\
_get_strategy_by_cls(LazyLoader).init_class_attribute(mapper)
def setup_query(self, context, entity, path, loadopt, adapter, \
column_collection=None, parentmapper=None,
**kwargs):
"""Add a left outer join to the statement thats being constructed."""
if not context.query._enable_eagerloads:
return
path = path[self.parent_property]
with_polymorphic = None
user_defined_adapter = self._init_user_defined_eager_proc(
loadopt, context) if loadopt else False
if user_defined_adapter is not False:
clauses, adapter, add_to_collection = \
self._setup_query_on_user_defined_adapter(
context, entity, path, adapter,
user_defined_adapter
)
else:
# if not via query option, check for
# a cycle
if not path.contains(context.attributes, "loader"):
if self.join_depth:
if path.length / 2 > self.join_depth:
return
elif path.contains_mapper(self.mapper):
return
clauses, adapter, add_to_collection = self._generate_row_adapter(
context, entity, path, loadopt, adapter,
column_collection, parentmapper
)
with_poly_info = path.get(
context.attributes,
"path_with_polymorphic",
None
)
if with_poly_info is not None:
with_polymorphic = with_poly_info.with_polymorphic_mappers
else:
with_polymorphic = None
path = path[self.mapper]
for value in self.mapper._iterate_polymorphic_properties(
mappers=with_polymorphic):
value.setup(
context,
entity,
path,
clauses,
parentmapper=self.mapper,
column_collection=add_to_collection)
if with_poly_info is not None and \
None in set(context.secondary_columns):
raise sa_exc.InvalidRequestError(
"Detected unaliased columns when generating joined "
"load. Make sure to use aliased=True or flat=True "
"when using joined loading with with_polymorphic()."
)
def _init_user_defined_eager_proc(self, loadopt, context):
# check if the opt applies at all
if "eager_from_alias" not in loadopt.local_opts:
# nope
return False
path = loadopt.path.parent
# the option applies. check if the "user_defined_eager_row_processor"
# has been built up.
adapter = path.get(context.attributes,
"user_defined_eager_row_processor", False)
if adapter is not False:
# just return it
return adapter
# otherwise figure it out.
alias = loadopt.local_opts["eager_from_alias"]
root_mapper, prop = path[-2:]
#from .mapper import Mapper
#from .interfaces import MapperProperty
#assert isinstance(root_mapper, Mapper)
#assert isinstance(prop, MapperProperty)
if alias is not None:
if isinstance(alias, str):
alias = prop.target.alias(alias)
adapter = sql_util.ColumnAdapter(alias,
equivalents=prop.mapper._equivalent_columns)
else:
if path.contains(context.attributes, "path_with_polymorphic"):
with_poly_info = path.get(context.attributes,
"path_with_polymorphic")
adapter = orm_util.ORMAdapter(
with_poly_info.entity,
equivalents=prop.mapper._equivalent_columns)
else:
adapter = context.query._polymorphic_adapters.get(prop.mapper, None)
path.set(context.attributes,
"user_defined_eager_row_processor",
adapter)
return adapter
def _setup_query_on_user_defined_adapter(self, context, entity,
path, adapter, user_defined_adapter):
# apply some more wrapping to the "user defined adapter"
# if we are setting up the query for SQL render.
adapter = entity._get_entity_clauses(context.query, context)
if adapter and user_defined_adapter:
user_defined_adapter = user_defined_adapter.wrap(adapter)
path.set(context.attributes, "user_defined_eager_row_processor",
user_defined_adapter)
elif adapter:
user_defined_adapter = adapter
path.set(context.attributes, "user_defined_eager_row_processor",
user_defined_adapter)
add_to_collection = context.primary_columns
return user_defined_adapter, adapter, add_to_collection
def _generate_row_adapter(self,
context, entity, path, loadopt, adapter,
column_collection, parentmapper
):
with_poly_info = path.get(
context.attributes,
"path_with_polymorphic",
None
)
if with_poly_info:
to_adapt = with_poly_info.entity
else:
to_adapt = orm_util.AliasedClass(self.mapper,
flat=True,
use_mapper_path=True)
clauses = orm_util.ORMAdapter(
to_adapt,
equivalents=self.mapper._equivalent_columns,
adapt_required=True)
assert clauses.aliased_class is not None
if self.parent_property.direction != interfaces.MANYTOONE:
context.multi_row_eager_loaders = True
innerjoin = (
loadopt.local_opts.get(
'innerjoin', self.parent_property.innerjoin)
if loadopt is not None
else self.parent_property.innerjoin
)
context.create_eager_joins.append(
(self._create_eager_join, context,
entity, path, adapter,
parentmapper, clauses, innerjoin)
)
add_to_collection = context.secondary_columns
path.set(context.attributes, "eager_row_processor", clauses)
return clauses, adapter, add_to_collection
def _create_eager_join(self, context, entity,
path, adapter, parentmapper,
clauses, innerjoin):
if parentmapper is None:
localparent = entity.mapper
else:
localparent = parentmapper
# whether or not the Query will wrap the selectable in a subquery,
# and then attach eager load joins to that (i.e., in the case of
# LIMIT/OFFSET etc.)
should_nest_selectable = context.multi_row_eager_loaders and \
context.query._should_nest_selectable
entity_key = None
if entity not in context.eager_joins and \
not should_nest_selectable and \
context.from_clause:
index, clause = \
sql_util.find_join_source(
context.from_clause, entity.selectable)
if clause is not None:
# join to an existing FROM clause on the query.
# key it to its list index in the eager_joins dict.
# Query._compile_context will adapt as needed and
# append to the FROM clause of the select().
entity_key, default_towrap = index, clause
if entity_key is None:
entity_key, default_towrap = entity, entity.selectable
towrap = context.eager_joins.setdefault(entity_key, default_towrap)
if adapter:
if getattr(adapter, 'aliased_class', None):
onclause = getattr(
adapter.aliased_class, self.key,
self.parent_property)
else:
onclause = getattr(
orm_util.AliasedClass(
self.parent,
adapter.selectable,
use_mapper_path=True
),
self.key, self.parent_property
)
else:
onclause = self.parent_property
assert clauses.aliased_class is not None
join_to_outer = innerjoin and isinstance(towrap, sql.Join) and towrap.isouter
if join_to_outer and innerjoin == 'nested':
inner = orm_util.join(
towrap.right,
clauses.aliased_class,
onclause,
isouter=False
)
eagerjoin = orm_util.join(
towrap.left,
inner,
towrap.onclause,
isouter=True
)
eagerjoin._target_adapter = inner._target_adapter
else:
if join_to_outer:
innerjoin = False
eagerjoin = orm_util.join(
towrap,
clauses.aliased_class,
onclause,
isouter=not innerjoin
)
context.eager_joins[entity_key] = eagerjoin
# send a hint to the Query as to where it may "splice" this join
eagerjoin.stop_on = entity.selectable
if self.parent_property.secondary is None and \
not parentmapper:
# for parentclause that is the non-eager end of the join,
# ensure all the parent cols in the primaryjoin are actually
# in the
# columns clause (i.e. are not deferred), so that aliasing applied
# by the Query propagates those columns outward.
# This has the effect
# of "undefering" those columns.
for col in sql_util._find_columns(
self.parent_property.primaryjoin):
if localparent.mapped_table.c.contains_column(col):
if adapter:
col = adapter.columns[col]
context.primary_columns.append(col)
if self.parent_property.order_by:
context.eager_order_by += \
eagerjoin._target_adapter.\
copy_and_process(
util.to_list(
self.parent_property.order_by
)
)
def _create_eager_adapter(self, context, row, adapter, path, loadopt):
user_defined_adapter = self._init_user_defined_eager_proc(
loadopt, context) if loadopt else False
if user_defined_adapter is not False:
decorator = user_defined_adapter
# user defined eagerloads are part of the "primary"
# portion of the load.
# the adapters applied to the Query should be honored.
if context.adapter and decorator:
decorator = decorator.wrap(context.adapter)
elif context.adapter:
decorator = context.adapter
else:
decorator = path.get(context.attributes, "eager_row_processor")
if decorator is None:
return False
try:
self.mapper.identity_key_from_row(row, decorator)
return decorator
except KeyError:
# no identity key - dont return a row
# processor, will cause a degrade to lazy
return False
def create_row_processor(self, context, path, loadopt, mapper, row, adapter):
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." %
self)
our_path = path[self.parent_property]
eager_adapter = self._create_eager_adapter(
context,
row,
adapter, our_path, loadopt)
if eager_adapter is not False:
key = self.key
_instance = loading.instance_processor(
self.mapper,
context,
our_path[self.mapper],
eager_adapter)
if not self.uselist:
return self._create_scalar_loader(context, key, _instance)
else:
return self._create_collection_loader(context, key, _instance)
else:
return self.parent_property.\
_get_strategy_by_cls(LazyLoader).\
create_row_processor(
context, path, loadopt,
mapper, row, adapter)
def _create_collection_loader(self, context, key, _instance):
def load_collection_from_joined_new_row(state, dict_, row):
collection = attributes.init_state_collection(
state, dict_, key)
result_list = util.UniqueAppender(collection,
'append_without_event')
context.attributes[(state, key)] = result_list
_instance(row, result_list)
def load_collection_from_joined_existing_row(state, dict_, row):
if (state, key) in context.attributes:
result_list = context.attributes[(state, key)]
else:
# appender_key can be absent from context.attributes
# with isnew=False when self-referential eager loading
# is used; the same instance may be present in two
# distinct sets of result columns
collection = attributes.init_state_collection(state,
dict_, key)
result_list = util.UniqueAppender(
collection,
'append_without_event')
context.attributes[(state, key)] = result_list
_instance(row, result_list)
def load_collection_from_joined_exec(state, dict_, row):
_instance(row, None)
return load_collection_from_joined_new_row, \
load_collection_from_joined_existing_row, \
None, load_collection_from_joined_exec
def _create_scalar_loader(self, context, key, _instance):
def load_scalar_from_joined_new_row(state, dict_, row):
# set a scalar object instance directly on the parent
# object, bypassing InstrumentedAttribute event handlers.
dict_[key] = _instance(row, None)
def load_scalar_from_joined_existing_row(state, dict_, row):
# call _instance on the row, even though the object has
# been created, so that we further descend into properties
existing = _instance(row, None)
if existing is not None \
and key in dict_ \
and existing is not dict_[key]:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded attribute '%s' "
% self)
def load_scalar_from_joined_exec(state, dict_, row):
_instance(row, None)
return load_scalar_from_joined_new_row, \
load_scalar_from_joined_existing_row, \
None, load_scalar_from_joined_exec
def single_parent_validator(desc, prop):
def _do_check(state, value, oldvalue, initiator):
if value is not None and initiator.key == prop.key:
hasparent = initiator.hasparent(attributes.instance_state(value))
if hasparent and oldvalue is not value:
raise sa_exc.InvalidRequestError(
"Instance %s is already associated with an instance "
"of %s via its %s attribute, and is only allowed a "
"single parent." %
(orm_util.instance_str(value), state.class_, prop)
)
return value
def append(state, value, initiator):
return _do_check(state, value, None, initiator)
def set_(state, value, oldvalue, initiator):
return _do_check(state, value, oldvalue, initiator)
event.listen(desc, 'append', append, raw=True, retval=True,
active_history=True)
event.listen(desc, 'set', set_, raw=True, retval=True,
active_history=True)
| gpl-2.0 |
acetcom/nextepc | lib/nas/5gs/support/cache/nas-msg-93.py | 2 | 1623 | ies = []
ies.append({ "iei" : "", "value" : "Selected NAS security algorithms", "type" : "security algorithms", "reference" : "9.11.3.34", "presence" : "M", "format" : "V", "length" : "1"})
ies.append({ "iei" : "", "value" : "ngKSI", "type" : "key set identifier", "reference" : "9.11.3.32", "presence" : "M", "format" : "V", "length" : "1/2"})
ies.append({ "iei" : "", "value" : "Replayed UE security capabilities", "type" : "UE security capability", "reference" : "9.11.3.54", "presence" : "M", "format" : "LV", "length" : "3-9"})
ies.append({ "iei" : "E-", "value" : "IMEISV request", "type" : "IMEISV request", "reference" : "9.11.3.28", "presence" : "O", "format" : "TV", "length" : "1"})
ies.append({ "iei" : "57", "value" : "Selected EPS NAS security algorithms", "type" : "EPS NAS security algorithms", "reference" : "9.11.3.25", "presence" : "O", "format" : "TV", "length" : "2"})
ies.append({ "iei" : "36", "value" : "Additional 5G security information", "type" : "Additional 5G security information", "reference" : "9.11.3.12", "presence" : "O", "format" : "TLV", "length" : "3"})
ies.append({ "iei" : "78", "value" : "EAP message", "type" : "EAP message", "reference" : "9.11.2.2", "presence" : "O", "format" : "TLV-E", "length" : "7-1503"})
ies.append({ "iei" : "38", "value" : "ABBA", "type" : "ABBA", "reference" : "9.11.3.10", "presence" : "O", "format" : "TLV", "length" : "4-n"})
ies.append({ "iei" : "19", "value" : "Replayed S1 UE security capabilities", "type" : "S1 UE security capability", "reference" : "9.11.3.48A", "presence" : "O", "format" : "TLV", "length" : "4-7"})
msg_list[key]["ies"] = ies
| agpl-3.0 |
boumenot/azure-linux-extensions | RDMAUpdate/setup.py | 6 | 5440 | #!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
# To build:
# python setup.py sdist
#
# To install:
# python setup.py install
#
# To register (only needed once):
# python setup.py register
#
# To upload:
# python setup.py sdist upload
from distutils.core import setup
import os
import shutil
import tempfile
import json
import sys
import subprocess
from subprocess import call
from zipfile import ZipFile
from main.Common import CommonVariables
packages_array = []
main_folder = 'main'
main_entry = main_folder + '/handle.py'
packages_array.append(main_folder)
patch_folder = main_folder + '/patch'
packages_array.append(patch_folder)
"""
copy the dependency to the local
"""
"""
copy the utils lib to local
"""
target_utils_path = main_folder + '/' + CommonVariables.utils_path_name
#if os.path.isdir(target_utils_path):
# shutil.rmtree(target_utils_path)
#print('copying')
#shutil.copytree ('../' + CommonVariables.utils_path_name, target_utils_path)
#print('copying end')
packages_array.append(target_utils_path)
"""
generate the HandlerManifest.json file.
"""
manifest_obj = [{
"name": CommonVariables.extension_name,
"version": CommonVariables.extension_version,
"handlerManifest": {
"installCommand": main_entry + " -install",
"uninstallCommand": main_entry + " -uninstall",
"updateCommand": main_entry + " -update",
"enableCommand": main_entry + " -enable",
"disableCommand": main_entry + " -disable",
"rebootAfterInstall": False,
"reportHeartbeat": False
}
}]
manifest_str = json.dumps(manifest_obj, sort_keys = True, indent = 4)
manifest_file = open("HandlerManifest.json", "w")
manifest_file.write(manifest_str)
manifest_file.close()
"""
generate the extension xml file
"""
extension_xml_file_content = """<ExtensionImage xmlns="http://schemas.microsoft.com/windowsazure">
<ProviderNameSpace>Microsoft.OSTCExtensions</ProviderNameSpace>
<Type>%s</Type>
<Version>%s</Version>
<Label>%s</Label>
<HostingResources>VmRole</HostingResources>
<MediaLink>%s</MediaLink>
<Description>%s</Description>
<IsInternalExtension>true</IsInternalExtension>
<Eula>https://github.com/Azure/azure-linux-extensions/blob/1.0/LICENSE-2_0.txt</Eula>
<PrivacyUri>https://github.com/Azure/azure-linux-extensions/blob/1.0/LICENSE-2_0.txt</PrivacyUri>
<HomepageUri>https://github.com/Azure/azure-linux-extensions</HomepageUri>
<IsJsonExtension>true</IsJsonExtension>
<CompanyName>Microsoft Open Source Technology Center</CompanyName>
</ExtensionImage>""" % (CommonVariables.extension_type,CommonVariables.extension_version,CommonVariables.extension_label,CommonVariables.extension_media_link,CommonVariables.extension_description)
extension_xml_file = open(CommonVariables.extension_name + '-' + str(CommonVariables.extension_version) + '.xml', 'w')
extension_xml_file.write(extension_xml_file_content)
extension_xml_file.close()
"""
setup script, to package the files up
"""
setup(name = CommonVariables.extension_name,
version = CommonVariables.extension_version,
description=CommonVariables.extension_description,
license='Apache License 2.0',
author='Microsoft Corporation',
author_email='andliu@microsoft.com',
url='https://github.com/Azure/azure-linux-extensions',
classifiers = ['Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: Apache Software License'],
packages = packages_array)
"""
unzip the package files and re-package it.
"""
target_zip_file_location = './dist/'
target_folder_name = CommonVariables.extension_name + '-' + str(CommonVariables.extension_version)
target_zip_file_path = target_zip_file_location + target_folder_name + '.zip'
target_zip_file = ZipFile(target_zip_file_path)
target_zip_file.extractall(target_zip_file_location)
def dos2unix(src):
args = ["dos2unix",src]
devnull = open(os.devnull, 'w')
child = subprocess.Popen(args, stdout=devnull, stderr=devnull)
print 'dos2unix %s ' % (src)
child.wait()
def zip(src, dst):
zf = ZipFile("%s" % (dst), "w")
abs_src = os.path.abspath(src)
for dirname, subdirs, files in os.walk(src):
for filename in files:
absname = os.path.abspath(os.path.join(dirname, filename))
dos2unix(absname)
arcname = absname[len(abs_src) + 1:]
print 'zipping %s as %s' % (os.path.join(dirname, filename),arcname)
zf.write(absname, arcname)
zf.close()
final_folder_path = target_zip_file_location + target_folder_name
zip(final_folder_path, target_zip_file_path)
| apache-2.0 |
imiolek-ireneusz/eduActiv8 | classes/updater.py | 1 | 1375 | # -*- coding: utf-8 -*-
import urllib
import socket
import threading
import classes.cversion
class Updater(threading.Thread):
def __init__(self, config, android):
self.config = config
self.android = android
if android is None:
threading.Thread.__init__(self)
@staticmethod
def internet(host="8.8.8.8", port=53, timeout=3):
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
return True
except Exception as ex:
#print ex.message
return False
def check4updates(self):
if self.android is None:
from lxml import etree
url = "https://www.updates.eduactiv8.org/update.xml"
update = urllib.urlopen(url).read()
root = etree.XML(update)
version = root.find(".//v")
version_value = version.text
self.config.avail_version = version_value
if version_value != classes.cversion.ver:
self.config.update_available = True
else:
self.config.update_available = False
def run(self):
try:
if self.internet():
self.check4updates()
except:
pass
"""
update.xml
<vi>
<v>3.80.411</v>
</vi>
""" | gpl-3.0 |
WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/IPython/nbconvert/preprocessors/tests/test_csshtmlheader.py | 27 | 1578 | """
Module with tests for the csshtmlheader preprocessor
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from .base import PreprocessorTestsBase
from ..csshtmlheader import CSSHTMLHeaderPreprocessor
#-----------------------------------------------------------------------------
# Class
#-----------------------------------------------------------------------------
class TestCSSHTMLHeader(PreprocessorTestsBase):
"""Contains test functions for csshtmlheader.py"""
def build_preprocessor(self):
"""Make an instance of a preprocessor"""
preprocessor = CSSHTMLHeaderPreprocessor()
preprocessor.enabled = True
return preprocessor
def test_constructor(self):
"""Can a CSSHTMLHeaderPreprocessor be constructed?"""
self.build_preprocessor()
def test_output(self):
"""Test the output of the CSSHTMLHeaderPreprocessor"""
nb = self.build_notebook()
res = self.build_resources()
preprocessor = self.build_preprocessor()
nb, res = preprocessor(nb, res)
assert 'css' in res['inlining']
| bsd-3-clause |
indictranstech/Das_frappe | frappe/tests/test_translation.py | 56 | 2242 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, unittest, os
import frappe.translate
# class TestTranslations(unittest.TestCase):
# def test_doctype(self, messages=None):
# if not messages:
# messages = frappe.translate.get_messages_from_doctype("Role")
# self.assertTrue("Role Name" in messages)
#
# def test_page(self, messages=None):
# if not messages:
# messages = frappe.translate.get_messages_from_page("finder")
# self.assertTrue("Finder" in messages)
#
# def test_report(self, messages=None):
# if not messages:
# messages = frappe.translate.get_messages_from_report("ToDo")
# self.assertTrue("Test" in messages)
#
# def test_include_js(self, messages=None):
# if not messages:
# messages = frappe.translate.get_messages_from_include_files("frappe")
# self.assertTrue("History" in messages)
#
# def test_server(self, messages=None):
# if not messages:
# messages = frappe.translate.get_server_messages("frappe")
# self.assertTrue("Login" in messages)
# self.assertTrue("Did not save" in messages)
#
# def test_all_app(self):
# messages = frappe.translate.get_messages_for_app("frappe")
# self.test_doctype(messages)
# self.test_page(messages)
# self.test_report(messages)
# self.test_include_js(messages)
# self.test_server(messages)
#
# def test_load_translations(self):
# frappe.translate.clear_cache()
# self.assertFalse(frappe.cache().hget("lang_full_dict", "de"))
#
# langdict = frappe.translate.get_full_dict("de")
# self.assertEquals(langdict['Row'], 'Reihe')
#
# def test_write_csv(self):
# tpath = frappe.get_pymodule_path("frappe", "translations", "de.csv")
# if os.path.exists(tpath):
# os.remove(tpath)
# frappe.translate.write_translations_file("frappe", "de")
# self.assertTrue(os.path.exists(tpath))
# self.assertEquals(dict(frappe.translate.read_csv_file(tpath)).get("Row"), "Reihe")
#
# def test_get_dict(self):
# frappe.local.lang = "de"
# self.assertEquals(frappe.get_lang_dict("doctype", "Role").get("Role"), "Rolle")
# frappe.local.lang = "en"
#
# if __name__=="__main__":
# frappe.connect("site1")
# unittest.main()
| mit |
nuigroup/kivy | kivy/logger.py | 3 | 8818 | '''
Logger object
=============
Differents level are available : trace, debug, info, warning, error, critical.
Examples of usage::
from kivy.logger import Logger
Logger.info('title: This is a info')
Logger.debug('title: This is a debug')
try:
raise Exception('bleh')
except Exception, e:
Logger.exception(e)
The message passed to the logger is splited to the first :. The left part is
used as a title, and the right part is used as a message. This way, you can
"categorize" your message easily::
Logger.info('Application: This is a test')
# will appear as
[INFO ] [Application ] This is a test
Logger configuration
--------------------
Logger can be controled in the Kivy configuration file::
[kivy]
log_level = info
log_enable = 1
log_dir = logs
log_name = kivy_%y-%m-%d_%_.txt
More information about the allowed values is described in :mod:`kivy.config`
module.
Logger history
--------------
Even if the logger is not enabled, you can still have the history of latest 100
messages::
from kivy.logger import LoggerHistory
print LoggerHistory.history
'''
import logging
import os
import sys
import kivy
from random import randint
from functools import partial
__all__ = ('Logger', 'LOG_LEVELS', 'COLORS', 'LoggerHistory')
Logger = None
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
#These are the sequences need to get colored ouput
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
previous_stderr = sys.stderr
def formatter_message(message, use_color=True):
if use_color:
message = message.replace("$RESET", RESET_SEQ)
message = message.replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
COLORS = {
'TRACE': MAGENTA,
'WARNING': YELLOW,
'INFO': GREEN,
'DEBUG': CYAN,
'CRITICAL': RED,
'ERROR': RED}
logging.TRACE = 9
LOG_LEVELS = {
'trace': logging.TRACE,
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
class FileHandler(logging.Handler):
history = []
filename = 'log.txt'
fd = None
def purge_logs(self, directory):
'''Purge log is called randomly, to prevent log directory to be filled
by lot and lot of log files.
You've a chance of 1 on 20 to fire a purge log.
'''
if randint(0, 20) != 0:
return
# Use config ?
maxfiles = 100
print 'Purge log fired. Analysing...'
join = os.path.join
unlink = os.unlink
# search all log files
l = map(lambda x: join(directory, x), os.listdir(directory))
if len(l) > maxfiles:
# get creation time on every files
l = zip(l, map(os.path.getctime, l))
# sort by date
l.sort(cmp=lambda x, y: cmp(x[1], y[1]))
# get the oldest (keep last maxfiles)
l = l[:-maxfiles]
print 'Purge %d log files' % len(l)
# now, unlink every files in the list
for filename in l:
unlink(filename[0])
print 'Purge finished !'
def _configure(self):
from time import strftime
from kivy.config import Config
log_dir = Config.get('kivy', 'log_dir')
log_name = Config.get('kivy', 'log_name')
_dir = kivy.kivy_home_dir
if len(log_dir) and log_dir[0] == '/':
_dir = log_dir
else:
_dir = os.path.join(_dir, log_dir)
if not os.path.exists(_dir):
os.mkdir(_dir)
self.purge_logs(_dir)
pattern = log_name.replace('%_', '@@NUMBER@@')
pattern = os.path.join(_dir, strftime(pattern))
n = 0
while True:
filename = pattern.replace('@@NUMBER@@', str(n))
if not os.path.exists(filename):
break
n += 1
if n > 10000: # prevent maybe flooding ?
raise Exception('Too many logfile, remove them')
FileHandler.filename = filename
FileHandler.fd = open(filename, 'w')
Logger.info('Logger: Record log in %s' % filename)
def _write_message(self, record):
if FileHandler.fd in (None, False):
return
FileHandler.fd.write('[%-18s] ' % record.levelname)
try:
FileHandler.fd.write(record.msg)
except UnicodeEncodeError:
FileHandler.fd.write(record.msg.encode('utf8'))
FileHandler.fd.write('\n')
FileHandler.fd.flush()
def emit(self, message):
# during the startup, store the message in the history
if Logger.logfile_activated is None:
FileHandler.history += [message]
return
# startup done, if the logfile is not activated, avoid history.
if Logger.logfile_activated is False:
FileHandler.history = []
return
if FileHandler.fd is None:
try:
self._configure()
except Exception:
# deactivate filehandler...
FileHandler.fd = False
Logger.exception('Error while activating FileHandler logger')
return
while FileHandler.history:
_message = FileHandler.history.pop()
self._write_message(_message)
self._write_message(message)
class LoggerHistory(logging.Handler):
history = []
def emit(self, message):
LoggerHistory.history = [message] + LoggerHistory.history[:100]
class ColoredFormatter(logging.Formatter):
def __init__(self, msg, use_color=True):
logging.Formatter.__init__(self, msg)
self.use_color = use_color
def format(self, record):
# XXX Hack to not show the fucking traceback for Numeric handler
# Lot of people are complaining with that. Now we did.
if 'Unable to load registered array format handler' in record.msg:
if record.args and record.args[0] == 'numeric':
return
try:
msg = record.msg.split(':', 1)
if len(msg) == 2:
record.msg = '[%-12s]%s' % (msg[0], msg[1])
except:
pass
levelname = record.levelname
if record.levelno == logging.TRACE:
levelname = 'TRACE'
record.levelname = levelname
if self.use_color and levelname in COLORS:
levelname_color = (
COLOR_SEQ % (30 + COLORS[levelname]) + levelname + RESET_SEQ)
record.levelname = levelname_color
return logging.Formatter.format(self, record)
class ConsoleHandler(logging.StreamHandler):
def filter(self, record):
try:
msg = record.msg
k = msg.split(':', 1)
if k[0] == 'stderr' and len(k) == 2:
previous_stderr.write(k[1] + '\n')
return False
except:
pass
return True
class LogFile(object):
def __init__(self, channel, func):
self.buffer = ''
self.func = func
self.channel = channel
def write(self, s):
s = self.buffer + s
self.flush()
f = self.func
channel = self.channel
lines = s.split('\n')
for l in lines[:-1]:
f('%s: %s' % (channel, l))
self.buffer = lines[-1]
def flush(self):
return
def logger_config_update(section, key, value):
if LOG_LEVELS.get(value) is None:
raise AttributeError('Loglevel {0!r} doesn\'t exists'.format(value))
Logger.setLevel(level=LOG_LEVELS.get(value))
#: Kivy default logger instance
Logger = logging.getLogger('kivy')
Logger.logfile_activated = None
Logger.trace = partial(Logger.log, logging.TRACE)
# add default kivy logger
Logger.addHandler(LoggerHistory())
if 'KIVY_NO_FILELOG' not in os.environ:
Logger.addHandler(FileHandler())
# Use the custom handler instead of streaming one.
if 'KIVY_NO_CONSOLELOG' not in os.environ:
if hasattr(sys, '_kivy_logging_handler'):
Logger.addHandler(getattr(sys, '_kivy_logging_handler'))
else:
use_color = os.name != 'nt'
if os.environ.get('KIVY_BUILD') in ('android', 'ios'):
use_color = False
color_fmt = formatter_message(
'[%(levelname)-18s] %(message)s', use_color)
formatter = ColoredFormatter(color_fmt, use_color=use_color)
console = ConsoleHandler()
console.setFormatter(formatter)
Logger.addHandler(console)
# install stderr handlers
sys.stderr = LogFile('stderr', Logger.warning)
#: Kivy history handler
LoggerHistory = LoggerHistory
| lgpl-3.0 |
rbramwell/pulp | server/test/unit/server/managers/repo/test_distributor.py | 6 | 23868 | import mock
from .... import base
from pulp.devel import mock_plugins
from pulp.plugins.config import PluginCallConfiguration
from pulp.server.db import model
from pulp.server.db.model.repository import RepoDistributor
import pulp.server.exceptions as exceptions
import pulp.server.managers.repo.distributor as distributor_manager
@mock.patch('pulp.server.managers.repo.distributor.model.Repository.objects')
class RepoDistributorManagerTests(base.PulpServerTests):
def setUp(self):
super(RepoDistributorManagerTests, self).setUp()
mock_plugins.install()
# Create the manager instance to test
self.distributor_manager = distributor_manager.RepoDistributorManager()
def tearDown(self):
super(RepoDistributorManagerTests, self).tearDown()
mock_plugins.reset()
def clean(self):
super(RepoDistributorManagerTests, self).clean()
mock_plugins.MOCK_DISTRIBUTOR.reset_mock()
model.Repository.drop_collection()
RepoDistributor.get_collection().remove()
def test_add_distributor(self, mock_repo_qs):
"""
Tests adding a distributor to a new repo.
"""
mock_repo = mock_repo_qs.get_repo_or_missing_resource()
config = {'key1': 'value1', 'key2': None}
added = self.distributor_manager.add_distributor('test_me', 'mock-distributor', config,
True, distributor_id='my_dist')
# Verify
expected_config = {'key1': 'value1'}
# Database
all_distributors = list(RepoDistributor.get_collection().find())
self.assertEqual(1, len(all_distributors))
self.assertEqual('my_dist', all_distributors[0]['id'])
self.assertEqual('mock-distributor', all_distributors[0]['distributor_type_id'])
self.assertEqual('test_me', all_distributors[0]['repo_id'])
self.assertEqual(expected_config, all_distributors[0]['config'])
self.assertTrue(all_distributors[0]['auto_publish'])
# Returned Value
self.assertEqual('my_dist', added['id'])
self.assertEqual('mock-distributor', added['distributor_type_id'])
self.assertEqual('test_me', added['repo_id'])
self.assertEqual(expected_config, added['config'])
self.assertTrue(added['auto_publish'])
# Plugin - Validate Config
self.assertEqual(1, mock_plugins.MOCK_DISTRIBUTOR.validate_config.call_count)
call_repo = mock_plugins.MOCK_DISTRIBUTOR.validate_config.call_args[0][0]
call_config = mock_plugins.MOCK_DISTRIBUTOR.validate_config.call_args[0][1]
self.assertTrue(call_repo is mock_repo.to_transfer_repo())
self.assertTrue(isinstance(call_config, PluginCallConfiguration))
self.assertTrue(call_config.plugin_config is not None)
self.assertEqual(call_config.repo_plugin_config, expected_config)
# Plugin - Distributor Added
self.assertEqual(1, mock_plugins.MOCK_DISTRIBUTOR.distributor_added.call_count)
call_repo = mock_plugins.MOCK_DISTRIBUTOR.validate_config.call_args[0][0]
call_config = mock_plugins.MOCK_DISTRIBUTOR.validate_config.call_args[0][1]
self.assertTrue(call_repo is mock_repo.to_transfer_repo())
self.assertTrue(isinstance(call_config, PluginCallConfiguration))
def test_add_distributor_multiple_distributors(self, mock_repo_qs):
"""
Tests adding a second distributor to a repository.
"""
self.distributor_manager.add_distributor(
'test_me', 'mock-distributor', {}, True, distributor_id='dist_1')
# Test
self.distributor_manager.add_distributor(
'test_me', 'mock-distributor-2', {}, True, distributor_id='dist_2')
# Verify
all_distributors = list(RepoDistributor.get_collection().find())
self.assertEqual(2, len(all_distributors))
dist_ids = [d['id'] for d in all_distributors]
self.assertTrue('dist_1' in dist_ids)
self.assertTrue('dist_2' in dist_ids)
def test_add_distributor_replace_existing(self, mock_repo_qs):
"""
Tests adding a distributor under the same ID as an existing distributor.
"""
self.distributor_manager.add_distributor(
'test_me', 'mock-distributor', {}, True, distributor_id='dist_1')
config = {'foo': 'bar'}
self.distributor_manager.add_distributor('test_me', 'mock-distributor', config, False,
distributor_id='dist_1')
# Database
all_distributors = list(RepoDistributor.get_collection().find())
self.assertEqual(1, len(all_distributors))
self.assertTrue(not all_distributors[0]['auto_publish'])
self.assertEqual(config, all_distributors[0]['config'])
# Plugin Calls
self.assertEqual(2, mock_plugins.MOCK_DISTRIBUTOR.distributor_added.call_count)
self.assertEqual(1, mock_plugins.MOCK_DISTRIBUTOR.distributor_removed.call_count)
def test_add_distributor_no_explicit_id(self, mock_repo_qs):
"""
Tests the ID generation when one is not specified for a distributor.
"""
added = self.distributor_manager.add_distributor('happy-repo', 'mock-distributor', {}, True)
# Verify
distributor = RepoDistributor.get_collection().find_one({'repo_id': 'happy-repo',
'id': added['id']})
self.assertTrue(distributor is not None)
def test_add_distributor_no_distributor(self, mock_repo_qs):
"""
Tests adding a distributor that doesn't exist.
"""
try:
self.distributor_manager.add_distributor('real-repo', 'fake-distributor', {}, True)
self.fail('No exception thrown for an invalid distributor type')
except exceptions.InvalidValue, e:
self.assertEqual(str(e), "Invalid properties: ['distributor_type_id']")
def test_add_distributor_invalid_id(self, mock_repo_qs):
"""
Tests adding a distributor with an invalid ID raises the correct error.
"""
bad_id = '!@#$%^&*()'
try:
self.distributor_manager.add_distributor('repo', 'mock-distributor', {}, True, bad_id)
self.fail('No exception thrown for an invalid distributor ID')
except exceptions.InvalidValue, e:
self.assertTrue('distributor_id' in e.property_names)
self.assertEqual(str(e), "Invalid properties: ['distributor_id']")
def test_add_distributor_initialize_raises_error(self, mock_repo_qs):
"""
Tests the correct error is raised when the distributor raises an error during validation.
"""
mock_plugins.MOCK_DISTRIBUTOR.distributor_added.side_effect = Exception()
try:
self.distributor_manager.add_distributor('repo', 'mock-distributor', {}, True)
self.fail('Exception expected for error during validate')
except exceptions.PulpExecutionException:
pass
# Cleanup
mock_plugins.MOCK_DISTRIBUTOR.distributor_added.side_effect = None
def test_add_distributor_validate_raises_error(self, mock_repo_qs):
"""
Tests the correct error is raised when the distributor raises an error during config
validation.
"""
mock_plugins.MOCK_DISTRIBUTOR.validate_config.side_effect = Exception()
try:
self.distributor_manager.add_distributor('rohan', 'mock-distributor', {}, True)
self.fail('Exception expected')
except Exception:
pass
def test_add_distributor_invalid_config(self, mock_repo_qs):
"""
Tests the correct error is raised when the distributor is handed an invalid configuration.
"""
mock_plugins.MOCK_DISTRIBUTOR.validate_config.return_value = (False, 'Invalid config')
try:
self.distributor_manager.add_distributor('error_repo', 'mock-distributor', {}, True)
self.fail('Exception expected for invalid configuration')
except exceptions.PulpDataException, e:
self.assertEqual(e[0], 'Invalid config')
# Cleanup
mock_plugins.MOCK_DISTRIBUTOR.validate_config.return_value = True
def test_add_distributor_invalid_config_backward_compatibility(self, mock_repo_qs):
"""
Tests the correct error is raised when the distributor is handed an invalid configuration.
"""
mock_plugins.MOCK_DISTRIBUTOR.validate_config.return_value = False
try:
self.distributor_manager.add_distributor('error_repo', 'mock-distributor', {}, True)
self.fail('Exception expected for invalid configuration')
except exceptions.PulpDataException:
pass
# Cleanup
mock_plugins.MOCK_DISTRIBUTOR.validate_config.return_value = True
@mock.patch(
'pulp.server.managers.schedule.repo.RepoPublishScheduleManager.delete_by_distributor_id')
def test_remove_distributor(self, mock_delete_schedules, mock_repo_qs):
"""
Tests removing an existing distributor from a repository.
"""
self.distributor_manager.add_distributor(
'dist-repo', 'mock-distributor', {}, True, distributor_id='doomed')
self.distributor_manager.remove_distributor('dist-repo', 'doomed')
# Verify
distributor = RepoDistributor.get_collection().find_one({'repo_id': 'dist-repo',
'id': 'doomed'})
self.assertTrue(distributor is None)
mock_delete_schedules.assert_called_once_with('dist-repo', 'doomed')
def test_remove_distributor_no_distributor(self, mock_repo_qs):
"""
Tests that no exception is raised when requested to remove a distributor that doesn't exist.
"""
try:
self.distributor_manager.remove_distributor('empty', 'non-existent')
except exceptions.MissingResource, e:
self.assertTrue('non-existent' == e.resources['distributor'])
def test_update_distributor_config(self, mock_repo_qs):
"""
Tests the successful case of updating a distributor's config.
"""
config = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}
distributor = self.distributor_manager.add_distributor('dawnstar', 'mock-distributor',
config, True)
dist_id = distributor['id']
# Test
delta_config = {'key1': 'updated1', 'key2': None}
self.distributor_manager.update_distributor_config('dawnstar', dist_id, delta_config)
# Verify
expected_config = {'key1': 'updated1', 'key3': 'value3'}
# Database
repo_dist = RepoDistributor.get_collection().find_one({'repo_id': 'dawnstar'})
self.assertEqual(repo_dist['config'], expected_config)
# Plugin
self.assertEqual(2, mock_plugins.MOCK_DISTRIBUTOR.validate_config.call_count)
call_config = mock_plugins.MOCK_DISTRIBUTOR.validate_config.call_args[0][1]
self.assertEqual(expected_config, call_config.repo_plugin_config)
def test_update_auto_publish(self, mock_repo_qs):
config = {'key': 'value'}
distributor = self.distributor_manager.add_distributor('test-repo', 'mock-distributor',
config, True)
# Test
self.distributor_manager.update_distributor_config('test-repo', distributor['id'], {},
False)
repo_dist = RepoDistributor.get_collection().find_one({'repo_id': 'test-repo'})
self.assertFalse(repo_dist['auto_publish'])
def test_update_invalid_auto_publish(self, mock_repo_qs):
config = {'key': 'value'}
distributor = self.distributor_manager.add_distributor(
'test-repo', 'mock-distributor', config, True)
# Test that an exception is raised if you hand update_distributor_config a bad auto_publish
self.assertRaises(
exceptions.InvalidValue, self.distributor_manager.update_distributor_config,
'test-repo', distributor['id'], {}, 'notbool')
def test_update_missing_distributor(self, mock_repo_qs):
"""
Tests updating the config on a distributor that doesn't exist on the repo.
"""
try:
self.distributor_manager.update_distributor_config('empty', 'missing', {})
self.fail('Exception expected')
except exceptions.MissingResource, e:
self.assertTrue('missing' == e.resources['distributor'])
def test_update_validate_exception(self, mock_repo_qs):
"""
Tests updating a config when the plugin raises an exception during validate.
"""
distributor = self.distributor_manager.add_distributor('elf', 'mock-distributor', {}, True)
dist_id = distributor['id']
class TestException(Exception):
pass
mock_plugins.MOCK_DISTRIBUTOR.validate_config.side_effect = TestException()
self.assertRaises(TestException, self.distributor_manager.update_distributor_config,
'elf', dist_id, {})
def test_update_invalid_config(self, mock_repo_qs):
"""
Tests updating a config when the plugin indicates the config is invalid.
"""
distributor = self.distributor_manager.add_distributor(
'dwarf', 'mock-distributor', {}, True)
dist_id = distributor['id']
mock_plugins.MOCK_DISTRIBUTOR.validate_config.return_value = (False, 'Invalid config')
# Test
try:
self.distributor_manager.update_distributor_config('dwarf', dist_id, {})
self.fail('Exception expected')
except exceptions.PulpDataException, e:
self.assertEqual(e[0], 'Invalid config')
# Cleanup
mock_plugins.MOCK_DISTRIBUTOR.validate_config.return_value = True
def test_update_invalid_config_backward_compatibility(self, mock_repo_qs):
"""
Tests updating a config when the plugin indicates the config is invalid.
"""
distributor = self.distributor_manager.add_distributor(
'dwarf', 'mock-distributor', {}, True)
dist_id = distributor['id']
mock_plugins.MOCK_DISTRIBUTOR.validate_config.return_value = False
# Test
try:
self.distributor_manager.update_distributor_config('dwarf', dist_id, {})
self.fail('Exception expected')
except exceptions.PulpDataException:
pass
# Cleanup
mock_plugins.MOCK_DISTRIBUTOR.validate_config.return_value = True
def test_create_bind_payload(self, mock_repo_qs):
# Setup
repo_id = 'repo-a'
distributor_id = 'dist-1'
binding_config = {'a': 'a'}
mock_repo = mock_repo_qs.get_repo_or_missing_resource.return_value
self.distributor_manager.add_distributor('repo-a', 'mock-distributor', {}, True,
distributor_id=distributor_id)
expected_payload = {'payload': 'stuff'}
mock_plugins.MOCK_DISTRIBUTOR.create_consumer_payload.return_value = expected_payload
# Test
payload = self.distributor_manager.create_bind_payload(repo_id, distributor_id,
binding_config)
# Verify
self.assertEqual(payload, expected_payload)
call_args = mock_plugins.MOCK_DISTRIBUTOR.create_consumer_payload.call_args[0]
self.assertEqual(call_args[0].id, mock_repo.to_transfer_repo().id)
self.assertTrue(isinstance(call_args[1], PluginCallConfiguration))
self.assertEqual(call_args[2], binding_config)
# Cleanup
mock_plugins.MOCK_DISTRIBUTOR.create_consumer_payload.return_value = None
def test_create_bind_payload_distributor_error(self, mock_repo_qs):
self.distributor_manager.add_distributor('repo-a', 'mock-distributor', {}, True,
distributor_id='dist-1')
# This module is doing some very strange things with mock, and this Exception side effect
# was causing other unrelated tests to fail. Unfortunately, the with operator does not work
# here due to the call to reset_mock() in the clean() method. This was the only way I could
# get this to work in reasonable time.
original_side_effect = mock_plugins.MOCK_DISTRIBUTOR.create_consumer_payload.side_effect
try:
mock_plugins.MOCK_DISTRIBUTOR.create_consumer_payload.side_effect = Exception()
# Test
self.assertRaises(
exceptions.PulpExecutionException,
self.distributor_manager.create_bind_payload, 'repo-a', 'dist-1', 'config')
finally:
mock_plugins.MOCK_DISTRIBUTOR.create_consumer_payload.side_effect = original_side_effect
def test_get_distributor(self, mock_repo_qs):
"""
Tests the successful case of getting a repo distributor.
"""
distributor_config = {'element': 'fire'}
self.distributor_manager.add_distributor('fire', 'mock-distributor', distributor_config,
True, distributor_id='flame')
# Test
distributor = self.distributor_manager.get_distributor('fire', 'flame')
# Verify
self.assertTrue(distributor is not None)
self.assertEqual(distributor['id'], 'flame')
self.assertEqual(distributor['repo_id'], 'fire')
self.assertEqual(distributor['config'], distributor_config)
def test_get_distributor_missing_distributor(self, mock_repo_qs):
"""
Tests the case of getting a distributor that doesn't exist on a valid repo.
"""
try:
self.distributor_manager.get_distributor('empty', 'irrelevant')
self.fail('Exception expected')
except exceptions.MissingResource, e:
self.assertTrue('irrelevant' == e.resources['distributor'])
def test_get_distributors(self, mock_repo_qs):
"""
Tests getting all distributors in the normal successful case.
"""
distributor_config = {'element': 'ice'}
self.distributor_manager.add_distributor(
'ice', 'mock-distributor', distributor_config, True, distributor_id='snowball-1')
self.distributor_manager.add_distributor(
'ice', 'mock-distributor', distributor_config, True, distributor_id='snowball-2')
# Test
distributors = self.distributor_manager.get_distributors('ice')
# Verify
self.assertTrue(distributors is not None)
self.assertEqual(2, len(distributors))
def test_get_distributors_none(self, mock_repo_qs):
"""
Tests an empty list is returned when none are present on the repo.
"""
distributors = self.distributor_manager.get_distributors('empty')
self.assertTrue(distributors is not None)
self.assertEqual(0, len(distributors))
def test_get_set_distributor_scratchpad(self, mock_repo_qs):
"""
Tests the retrieval and setting of a repo distributor's scratchpad.
"""
self.distributor_manager.add_distributor(
'repo', 'mock-distributor', {}, True, distributor_id='dist')
# Test - Unset Scratchpad
scratchpad = self.distributor_manager.get_distributor_scratchpad('repo', 'dist')
self.assertTrue(scratchpad is None)
# Test - Set
contents = 'gnomish mines'
self.distributor_manager.set_distributor_scratchpad('repo', 'dist', contents)
# Test - Get
scratchpad = self.distributor_manager.get_distributor_scratchpad('repo', 'dist')
self.assertEqual(contents, scratchpad)
def test_get_set_distributor_scratchpad_missing(self, mock_repo_qs):
"""
Tests no error is raised when getting or setting the scratchpad for missing cases.
"""
scratchpad = self.distributor_manager.get_distributor_scratchpad('empty', 'not_there')
self.assertTrue(scratchpad is None)
# Test - Set No Distributor
self.distributor_manager.set_distributor_scratchpad('empty', 'fake_distributor', 'stuff')
# Test - Set No Repo
self.distributor_manager.set_distributor_scratchpad('fake', 'irrelevant', 'blah')
def test_publish_schedule(self, mock_repo_qs):
# setup
repo_id = 'scheduled_repo'
distributor_type_id = 'mock-distributor'
distributor_id = 'scheduled_repo_distributor'
schedule_id = 'scheduled_repo_publish'
self.distributor_manager.add_distributor(
repo_id, distributor_type_id, {}, False, distributor_id=distributor_id)
# pre-condition
self.assertEqual(
len(self.distributor_manager.list_publish_schedules(repo_id, distributor_id)), 0)
# add the schedule
self.distributor_manager.add_publish_schedule(repo_id, distributor_id, schedule_id)
self.assertTrue(
schedule_id in self.distributor_manager.list_publish_schedules(repo_id, distributor_id))
self.assertEqual(
len(self.distributor_manager.list_publish_schedules(repo_id, distributor_id)), 1)
# idempotent add
self.distributor_manager.add_publish_schedule(repo_id, distributor_id, schedule_id)
self.assertEqual(
len(self.distributor_manager.list_publish_schedules(repo_id, distributor_id)), 1)
# remove the schedule
self.distributor_manager.remove_publish_schedule(repo_id, distributor_id, schedule_id)
self.assertFalse(
schedule_id in self.distributor_manager.list_publish_schedules(repo_id, distributor_id))
self.assertEqual(
len(self.distributor_manager.list_publish_schedules(repo_id, distributor_id)), 0)
# idempotent remove
self.distributor_manager.remove_publish_schedule(repo_id, distributor_id, schedule_id)
self.assertEqual(
len(self.distributor_manager.list_publish_schedules(repo_id, distributor_id)), 0)
# errors
self.distributor_manager.remove_distributor(repo_id, distributor_id)
self.assertRaises(exceptions.MissingResource,
self.distributor_manager.add_publish_schedule,
repo_id, distributor_id, schedule_id)
self.assertRaises(exceptions.MissingResource,
self.distributor_manager.remove_publish_schedule,
repo_id, distributor_id, schedule_id)
@mock.patch.object(RepoDistributor, 'get_collection')
def test_find_by_repo_list(self, mock_get_collection, mock_repo_qs):
EXPECT = {'repo_id': {'$in': ['repo-1']}}
PROJECTION = {'scratchpad': 0}
self.distributor_manager.find_by_repo_list(['repo-1'])
self.assertTrue(mock_get_collection.return_value.find.called)
mock_get_collection.return_value.find.assert_called_once_with(EXPECT, PROJECTION)
@mock.patch.object(RepoDistributor, 'get_collection')
def test_find_by_criteria(self, get_collection, mock_repo_qs):
criteria = mock.Mock()
collection = mock.Mock()
get_collection.return_value = collection
# test
result = self.distributor_manager.find_by_criteria(criteria)
# validation
get_collection.assert_called_once_with()
collection.query.assert_called_once_with(criteria)
self.assertEqual(result, collection.query.return_value)
| gpl-2.0 |
katerina7479/python-units-of-measure | pyuom/Area.py | 2 | 3144 | from PhysicalQuantity import PhysicalQuantity
from UnitOfMeasure import UnitOfMeasure
class Area(PhysicalQuantity):
def __init__(self, value, unit):
super(Area, self).__init__(value, unit)
# Meters squared
new_unit = UnitOfMeasure(
'm^2',
lambda x: x,
lambda y: y
)
new_unit.addAlias('m²')
new_unit.addAlias('meter squared')
new_unit.addAlias('meters squared')
self.registerUnitOfMeasure(new_unit)
# Millimeter squared
new_unit = UnitOfMeasure(
'mm^2',
lambda x: x / 1e-6,
lambda y: y * 1e-6,
)
new_unit.addAlias('mm²')
new_unit.addAlias('millimeter squared')
new_unit.addAlias('millimeters squared')
self.registerUnitOfMeasure(new_unit)
# Centimeter squared
new_unit = UnitOfMeasure(
'cm^2',
lambda x: x / 1e-4,
lambda y: y * 1e-4
)
new_unit.addAlias('cm²')
new_unit.addAlias('centimeter squared')
new_unit.addAlias('centimeters squared')
self.registerUnitOfMeasure(new_unit)
# Decimeter squared
new_unit = UnitOfMeasure(
'dm^2',
lambda x: x / 1e-2,
lambda y: y * 1e-2
)
new_unit.addAlias('dm²')
new_unit.addAlias('decimeter squared')
new_unit.addAlias('decimeters squared')
self.registerUnitOfMeasure(new_unit)
# Kilometer squared
new_unit = UnitOfMeasure(
'km^2',
lambda x: x / 1e6,
lambda y: y * 1e6
)
new_unit.addAlias('km²')
new_unit.addAlias('kilometer squared')
new_unit.addAlias('kilometers squared')
self.registerUnitOfMeasure(new_unit)
# Foot squared
new_unit = UnitOfMeasure(
'ft^2',
lambda x: x / 9.290304e-2,
lambda y: y * 9.290304e-2
)
new_unit.addAlias('ft²')
new_unit.addAlias('foot squared')
new_unit.addAlias('feet squared')
self.registerUnitOfMeasure(new_unit)
# Inch squared
new_unit = UnitOfMeasure(
'in^2',
lambda x: x / 6.4516e-4,
lambda y: y * 6.4516e-4
)
new_unit.addAlias('in²')
new_unit.addAlias('inch squared')
new_unit.addAlias('inches squared')
self.registerUnitOfMeasure(new_unit)
# Mile squared
new_unit = UnitOfMeasure(
'mi^2',
lambda x: x / 2.589988e6,
lambda y: y * 2.589988e6
)
new_unit.addAlias('mi²')
new_unit.addAlias('mile squared')
new_unit.addAlias('miles squared')
self.registerUnitOfMeasure(new_unit)
# Yard squared
new_unit = UnitOfMeasure(
'yd^2',
lambda x: x / 8.361274e-1,
lambda y: y * 8.361274e-1
)
new_unit.addAlias('yd²')
new_unit.addAlias('yard squared')
new_unit.addAlias('yards squared')
self.registerUnitOfMeasure(new_unit)
| mit |
ddico/rma | crm_rma_claim_make_claim/models/__init__.py | 4 | 1040 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright 2015 Vauxoo
# Author : Yanina Aular <yani@vauxoo.com>
# Osval Reyes <osval@vauxoo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import claim_line
from . import crm_claim
| agpl-3.0 |
julian-seward1/servo | tests/wpt/css-tests/css21_dev/html4/reference/support/fonts/makegsubfonts.py | 1616 | 14125 |
import os
import textwrap
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.psCharStrings import T2CharString
from fontTools.ttLib.tables.otTables import GSUB,\
ScriptList, ScriptRecord, Script, DefaultLangSys,\
FeatureList, FeatureRecord, Feature,\
LookupList, Lookup, AlternateSubst, SingleSubst
# paths
directory = os.path.dirname(__file__)
shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx")
shellTempPath = os.path.join(directory, "gsubtest-shell.otf")
featureList = os.path.join(directory, "gsubtest-features.txt")
javascriptData = os.path.join(directory, "gsubtest-features.js")
outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d")
baseCodepoint = 0xe000
# -------
# Features
# -------
f = open(featureList, "rb")
text = f.read()
f.close()
mapping = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# parse
values = line.split("\t")
tag = values.pop(0)
mapping.append(tag);
# --------
# Outlines
# --------
def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def makeLookup1():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
# bump this up so that the sequence is the same as the lookup 3 font
cp += 3
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 1
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = SingleSubst()
subtable.Format = 2
subtable.LookupType = 1
subtable.mapping = {
"%s.pass" % tag : "%s.fail" % tag,
"%s.fail" % tag : "%s.pass" % tag,
}
lookup.SubTable.append(subtable)
path = outputPath % 1 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeLookup3():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
# tag.default
glyphName = "%s.default" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.alt1,2,3
for i in range(1,4):
glyphName = "%s.alt%d" % (tag, i)
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 3
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = AlternateSubst()
subtable.Format = 1
subtable.LookupType = 3
subtable.alternates = {
"%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag],
"%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag]
}
lookup.SubTable.append(subtable)
path = outputPath % 3 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeJavascriptData():
features = sorted(mapping)
outStr = []
outStr.append("")
outStr.append("/* This file is autogenerated by makegsubfonts.py */")
outStr.append("")
outStr.append("/* ")
outStr.append(" Features defined in gsubtest fonts with associated base")
outStr.append(" codepoints for each feature:")
outStr.append("")
outStr.append(" cp = codepoint for feature featX")
outStr.append("")
outStr.append(" cp default PASS")
outStr.append(" cp featX=1 FAIL")
outStr.append(" cp featX=2 FAIL")
outStr.append("")
outStr.append(" cp+1 default FAIL")
outStr.append(" cp+1 featX=1 PASS")
outStr.append(" cp+1 featX=2 FAIL")
outStr.append("")
outStr.append(" cp+2 default FAIL")
outStr.append(" cp+2 featX=1 FAIL")
outStr.append(" cp+2 featX=2 PASS")
outStr.append("")
outStr.append("*/")
outStr.append("")
outStr.append("var gFeatures = {");
cp = baseCodepoint
taglist = []
for tag in features:
taglist.append("\"%s\": 0x%x" % (tag, cp))
cp += 4
outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" "))
outStr.append("};");
outStr.append("");
if os.path.exists(javascriptData):
os.remove(javascriptData)
f = open(javascriptData, "wb")
f.write("\n".join(outStr))
f.close()
# build fonts
print "Making lookup type 1 font..."
makeLookup1()
print "Making lookup type 3 font..."
makeLookup3()
# output javascript data
print "Making javascript data file..."
makeJavascriptData()
| mpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.